-#!/bin/sh
+#!/bin/bash
set -e
+#set -v
#
# This test needs to be run on the client
#
-
-LUSTRE=${LUSTRE:-`dirname $0`/..}
+SAVE_PWD=$PWD
+LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
+SETUP=${SETUP:-}
+CLEANUP=${CLEANUP:-}
. $LUSTRE/tests/test-framework.sh
-
init_test_env $@
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+CHECK_GRANT=${CHECK_GRANT:-"yes"}
+GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
-. ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
-
-build_test_filter
-
-assert_env MDSCOUNT
+remote_mds_nodsh && log "SKIP: remote MDS with nodsh" && exit 0
# Skip these tests
-ALWAYS_EXCEPT=""
-
-if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
- ALWAYS_EXCEPT="0c $ALWAYS_EXCEPT"
+# bug number: 17466 15962
+ALWAYS_EXCEPT="61d 33b $REPLAY_SINGLE_EXCEPT"
+
+if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
+ CONFIG_EXCEPTIONS="0b 42 47 61a 61c"
+ echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
+ echo "Except the tests: $CONFIG_EXCEPTIONS"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
fi
-
-gen_config() {
- rm -f $XMLCONFIG
-
- if [ "$MDSCOUNT" -gt 1 ]; then
- add_lmv lmv1_svc
- for mds in `mds_list`; do
- MDSDEV=$TMP/${mds}-`hostname`
- add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
- done
- add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=lmv1
- else
- add_mds mds1 --dev $MDSDEV --size $MDSSIZE
- if [ ! -z "$mds1failover_HOST" ]; then
- add_mdsfailover mds1 --dev $MDSDEV --size $MDSSIZE
- fi
- add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=mds1_svc
- fi
-
- add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
- add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
- add_client client $MDS --lov lov1 --path $MOUNT
-}
+# 63 min 7 min AT AT AT AT"
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
build_test_filter
-cleanup() {
- # make sure we are using the primary MDS, so the config log will
- # be able to clean up properly.
- activemds=`facet_active mds1`
- if [ $activemds != "mds1" ]; then
- fail mds1
- fi
- zconf_umount `hostname` $MOUNT
- for mds in `mds_list`; do
- stop $mds ${FORCE} $MDSLCONFARGS
- done
- stop_lgssd
- stop_lsvcgssd
- stop ost2 ${FORCE} --dump cleanup.log
- stop ost ${FORCE} --dump cleanup.log
-}
-
-if [ "$ONLY" == "cleanup" ]; then
- sysctl -w portals.debug=0 || true
- cleanup
- exit
-fi
-
-SETUP=${SETUP:-"setup"}
-CLEANUP=${CLEANUP:-"cleanup"}
-
-setup() {
- gen_config
-
- start_krb5_kdc || exit 1
- start ost --reformat $OSTLCONFARGS
- start ost2 --reformat $OSTLCONFARGS
- start_lsvcgssd || exit 2
- start_lgssd || exit 3
- [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
- for mds in `mds_list`; do
- start $mds --reformat $MDSLCONFARGS
- done
- grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
-}
-
-$SETUP
-
-if [ "$ONLY" == "setup" ]; then
- exit 0
-fi
+check_and_setup_lustre
mkdir -p $DIR
-test_0() {
- replay_barrier mds1
- fail mds1
+assert_DIR
+rm -rf $DIR/[df][0-9]*
+
+test_0a() { # was test_0
+ sleep 10
+ mkdir $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
+ rmdir $DIR/$tfile
}
-run_test 0 "empty replay"
+run_test 0a "empty replay"
test_0b() {
- # this test attempts to trigger a race in the precreation code,
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ # this test attempts to trigger a race in the precreation code,
# and must run before any other objects are created on the filesystem
- fail ost
+ fail ost1
createmany -o $DIR/$tfile 20 || return 1
unlinkmany $DIR/$tfile 20 || return 2
}
run_test 0b "ensure object created after recover exists. (3284)"
+seq_set_width()
+{
+ local mds=$1
+ local width=$2
+ lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width
+}
+
+seq_get_width()
+{
+ local mds=$1
+ lctl get_param -n seq.cli-srv-$mds-mdc-*.width
+}
+
+# This test should pass for single-mds and multi-mds configs.
+# But for different configurations it tests different things.
+#
+# single-mds
+# ----------
+# (1) fld_create replay should happen;
+#
+# (2) fld_create replay should not return -EEXISTS, if it does
+# this means sequence manager recovery code is buggy and allocated
+# same sequence two times after recovery.
+#
+# multi-mds
+# ---------
+# (1) fld_create replay may not happen, because its home MDS is
+# MDS2 which is not involved to revovery;
+#
+# (2) as fld_create does not happen on MDS1, it does not make any
+# problem.
test_0c() {
- # drop gss error notification
- replay_barrier mds1
- fail_drop mds1 0x760
+ local label=`mdsdevlabel 1`
+ [ -z "$label" ] && echo "No label for mds1" && return 1
- # drop gss init request
- replay_barrier mds1
- fail_drop mds1 0x780
+ replay_barrier $SINGLEMDS
+ local sw=`seq_get_width $label`
+
+ # make seq manager switch to next sequence each
+ # time as new fid is needed.
+ seq_set_width $label 1
+
+ # make sure that fld has created at least one new
+ # entry on server
+ touch $DIR/$tfile || return 2
+ seq_set_width $label $sw
+
+ # fail $SINGLEMDS and start recovery, replay RPCs, etc.
+ fail $SINGLEMDS
+
+ # wait for recovery finish
+ sleep 10
+ df $MOUNT
+
+ # flush fld cache and dentry cache to make it lookup
+ # created entry instead of revalidating existent one
+ umount $MOUNT
+ zconf_mount `hostname` $MOUNT
+
+ # issue lookup which should call fld lookup which
+ # should fail if client did not replay fld create
+ # correctly and server has no fld entry
+ touch $DIR/$tfile || return 3
+ rm $DIR/$tfile || return 4
}
-run_test 0c "empty replay with gss init failures"
+run_test 0c "fld create"
test_1() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tfile
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile || return 1
rm $DIR/$tfile
}
run_test 1 "simple create"
test_2a() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
touch $DIR/$tfile
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile || return 1
rm $DIR/$tfile
}
run_test 2a "touch"
test_2b() {
- ./mcreate $DIR/$tfile
- replay_barrier mds1
+ mcreate $DIR/$tfile
+ replay_barrier $SINGLEMDS
touch $DIR/$tfile
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile || return 1
rm $DIR/$tfile
}
run_test 2b "touch"
test_3a() {
- replay_barrier mds1
- mcreate $DIR/$tfile
- o_directory $DIR/$tfile
- fail mds1
- $CHECKSTAT -t file $DIR/$tfile || return 2
- rm $DIR/$tfile
+ local file=$DIR/$tfile
+ replay_barrier $SINGLEMDS
+ mcreate $file
+ openfile -f O_DIRECTORY $file
+ fail $SINGLEMDS
+ $CHECKSTAT -t file $file || return 2
+ rm $file
}
run_test 3a "replay failed open(O_DIRECTORY)"
test_3b() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
#define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
- do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
touch $DIR/$tfile
- do_facet mds "sysctl -w lustre.fail_loc=0"
- fail mds1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile && return 2
return 0
}
run_test 3b "replay failed open -ENOMEM"
test_3c() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
#define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
- do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
touch $DIR/$tfile
- do_facet mds "sysctl -w lustre.fail_loc=0"
- fail mds1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile && return 2
return 0
}
run_test 3c "replay failed open -ENOMEM"
-test_4() {
- replay_barrier mds1
+test_4a() { # was test_4
+ replay_barrier $SINGLEMDS
for i in `seq 10`; do
echo "tag-$i" > $DIR/$tfile-$i
- done
- fail mds1
+ done
+ fail $SINGLEMDS
for i in `seq 10`; do
grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
- done
+ done
}
-run_test 4 "|x| 10 open(O_CREAT)s"
+run_test 4a "|x| 10 open(O_CREAT)s"
test_4b() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -rf $DIR/$tfile-*
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
}
run_test 4b "|x| rm 10 files"
-# The idea is to get past the first block of precreated files on both
+# The idea is to get past the first block of precreated files on both
# osts, and then replay.
test_5() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
for i in `seq 220`; do
echo "tag-$i" > $DIR/$tfile-$i
- done
- fail mds1
+ done
+ fail $SINGLEMDS
for i in `seq 220`; do
- grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
- done
+ grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
+ done
rm -rf $DIR/$tfile-*
sleep 3
# waiting for commitment of removal
run_test 5 "|x| 220 open(O_CREAT)"
-test_6() {
- replay_barrier mds1
- mkdir $DIR/$tdir
+test_6a() { # was test_6
+ mkdir -p $DIR/$tdir
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tdir/$tfile
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t dir $DIR/$tdir || return 1
$CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
sleep 2
# waiting for log process thread
}
-run_test 6 "mkdir + contained create"
+run_test 6a "mkdir + contained create"
test_6b() {
- replay_barrier mds1
+ mkdir -p $DIR/$tdir
+ replay_barrier $SINGLEMDS
rm -rf $DIR/$tdir
- fail mds1
- $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
+ fail $SINGLEMDS
+ $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
}
run_test 6b "|X| rmdir"
test_7() {
- mkdir $DIR/$tdir
- replay_barrier mds1
+ mkdir -p $DIR/$tdir
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tdir/$tfile
- fail mds1
+ fail $SINGLEMDS
$CHECKSTAT -t dir $DIR/$tdir || return 1
$CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
rm -fr $DIR/$tdir
run_test 7 "mkdir |X| contained create"
test_8() {
- replay_barrier mds1
- multiop $DIR/$tfile mo_c &
+ # make sure no side-effect from previous test.
+ rm -f $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile mo_c || return 4
MULTIPID=$!
- sleep 1
- fail mds1
+ fail $SINGLEMDS
ls $DIR/$tfile
$CHECKSTAT -t file $DIR/$tfile || return 1
kill -USR1 $MULTIPID || return 2
run_test 8 "creat open |X| close"
test_9() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tfile
local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
- fail mds1
+ fail $SINGLEMDS
local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
echo " old_inum == $old_inum, new_inum == $new_inum"
test_10() {
mcreate $DIR/$tfile
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mv $DIR/$tfile $DIR/$tfile-2
rm -f $DIR/$tfile
- fail mds1
-
+ fail $SINGLEMDS
$CHECKSTAT $DIR/$tfile && return 1
- $CHECKSTAT $DIR/$tfile-2 || return 2
+ $CHECKSTAT $DIR/$tfile-2 ||return 2
rm $DIR/$tfile-2
return 0
}
mcreate $DIR/$tfile
echo "old" > $DIR/$tfile
mv $DIR/$tfile $DIR/$tfile-2
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
echo "new" > $DIR/$tfile
- grep new $DIR/$tfile
+ grep new $DIR/$tfile
grep old $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
grep new $DIR/$tfile || return 1
grep old $DIR/$tfile-2 || return 2
}
run_test 11 "create open write rename |X| create-old-name read"
test_12() {
- mcreate $DIR/$tfile
- multiop $DIR/$tfile o_tSc &
+ mcreate $DIR/$tfile
+ multiop_bg_pause $DIR/$tfile o_tSc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
- fail mds1
+ fail $SINGLEMDS
[ -e $DIR/$tfile ] && return 2
return 0
}
# 1777 - replay open after committed chmod that would make
-# a regular open a failure
+# a regular open a failure
test_13() {
- mcreate $DIR/$tfile
- multiop $DIR/$tfile O_wc &
+ mcreate $DIR/$tfile
+ multiop_bg_pause $DIR/$tfile O_wc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
chmod 0 $DIR/$tfile
$CHECKSTAT -p 0 $DIR/$tfile
- replay_barrier mds1
- fail mds1
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
run_test 13 "open chmod 0 |x| write close"
test_14() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 4
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
kill -USR1 $pid || return 1
wait $pid || return 2
- fail mds1
+ fail $SINGLEMDS
[ -e $DIR/$tfile ] && return 3
return 0
}
run_test 14 "open(O_CREAT), unlink |X| close"
test_15() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 5
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
touch $DIR/g11 || return 1
kill -USR1 $pid
wait $pid || return 2
- fail mds1
+ fail $SINGLEMDS
[ -e $DIR/$tfile ] && return 3
touch $DIR/h11 || return 4
return 0
test_16() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tfile
munlink $DIR/$tfile
mcreate $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
[ -e $DIR/$tfile ] && return 1
[ -e $DIR/$tfile-2 ] || return 2
munlink $DIR/$tfile-2 || return 3
run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
test_17() {
- replay_barrier mds1
- multiop $DIR/$tfile O_c &
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile O_c || return 4
pid=$!
- # give multiop a chance to open
- sleep 1
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid || return 1
wait $pid || return 2
$CHECKSTAT -t file $DIR/$tfile || return 3
run_test 17 "|X| open(O_CREAT), |replay| close"
test_18() {
- replay_barrier mds1
- multiop $DIR/$tfile O_tSc &
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile O_tSc || return 8
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
touch $DIR/$tfile-2 || return 1
echo "pid: $pid will close"
kill -USR1 $pid
wait $pid || return 2
- fail mds1
+ fail $SINGLEMDS
[ -e $DIR/$tfile ] && return 3
[ -e $DIR/$tfile-2 ] || return 4
# this touch frequently fails
# bug 1855 (a simpler form of test_11 above)
test_19() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $DIR/$tfile
echo "old" > $DIR/$tfile
mv $DIR/$tfile $DIR/$tfile-2
grep old $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
grep old $DIR/$tfile-2 || return 2
}
run_test 19 "|X| mcreate, open, write, rename "
-test_20() {
- replay_barrier mds1
- multiop $DIR/$tfile O_tSc &
+test_20a() { # was test_20
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
[ -e $DIR/$tfile ] && return 2
return 0
}
-run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
+run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
+
+test_20b() { # bug 10480
+ BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+
+ dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
+ pid=$!
+ while [ ! -e $DIR/$tfile ] ; do
+ usleep 60 # give dd a chance to start
+ done
+
+ lfs getstripe $DIR/$tfile || return 1
+ rm -f $DIR/$tfile || return 2 # make it an orphan
+ mds_evict_client
+ df -P $DIR || df -P $DIR || true # reconnect
+
+ fail $SINGLEMDS # start orphan recovery
+ df -P $DIR || df -P $DIR || true # reconnect
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
+
+ # just because recovery is done doesn't mean we've finished
+ # orphan cleanup. Wait for llogs to get synchronized.
+ echo waiting for orphan cleanup...
+ while [ true ]; do
+ local -a sync=($(do_facet ost "$LCTL get_param obdfilter.*.mds_sync" | awk -F= ' {print $2}'))
+ local con=1
+ for ((i=0; i<${#sync[@]}; i++)); do
+ [ ${sync[$i]} -eq 0 ] && continue
+ # there is a not finished MDS-OST synchronization
+ con=0
+ break;
+ done
+ [ ${con} -eq 1 ] && break
+ sleep 1
+ done
+
+ # let the statfs cache to get old enough.
+ sleep 1
+
+ AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+ log "before $BEFOREUSED, after $AFTERUSED"
+ [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
+ error "after $AFTERUSED > before $BEFOREUSED"
+ return 0
+}
+run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
+
+test_20c() { # bug 10480
+ multiop_bg_pause $DIR/$tfile Ow_c || return 1
+ pid=$!
+
+ ls -la $DIR/$tfile
+
+ mds_evict_client
+
+ df -P $DIR || df -P $DIR || true # reconnect
+
+ kill -USR1 $pid
+ test -s $DIR/$tfile || error "File was truncated"
+
+ wait $pid || return 1
+ return 0
+}
+run_test 20c "check that client eviction does not affect file content"
test_21() {
- replay_barrier mds1
- multiop $DIR/$tfile O_tSc &
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile O_tSc || return 5
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
touch $DIR/g11 || return 1
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 2
[ -e $DIR/$tfile ] && return 3
run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
test_22() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -f $DIR/$tfile
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
[ -e $DIR/$tfile ] && return 2
run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
test_23() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 5
pid=$!
- # give multiop a chance to open
- sleep 1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -f $DIR/$tfile
touch $DIR/g11 || return 1
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 2
[ -e $DIR/$tfile ] && return 3
run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
test_24() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
- replay_barrier mds1
- fail mds1
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
rm -f $DIR/$tfile
kill -USR1 $pid
wait $pid || return 1
run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
test_25() {
- multiop $DIR/$tfile O_tSc &
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- replay_barrier mds1
- fail mds1
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
[ -e $DIR/$tfile ] && return 2
run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
test_26() {
- replay_barrier mds1
- multiop $DIR/$tfile-1 O_tSc &
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile-1
rm -f $DIR/$tfile-2
kill -USR1 $pid2
wait $pid2 || return 1
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 2
[ -e $DIR/$tfile-1 ] && return 3
run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
test_27() {
- replay_barrier mds1
- multiop $DIR/$tfile-1 O_tSc &
+ replay_barrier $SINGLEMDS
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile-1
rm -f $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
kill -USR1 $pid2
run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
test_28() {
- multiop $DIR/$tfile-1 O_tSc &
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -f $DIR/$tfile-1
rm -f $DIR/$tfile-2
kill -USR1 $pid2
wait $pid2 || return 1
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 2
[ -e $DIR/$tfile-1 ] && return 3
run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
test_29() {
- multiop $DIR/$tfile-1 O_tSc &
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -f $DIR/$tfile-1
rm -f $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
kill -USR1 $pid2
run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
test_30() {
- multiop $DIR/$tfile-1 O_tSc &
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile-1
rm -f $DIR/$tfile-2
- replay_barrier mds1
- fail mds1
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
kill -USR1 $pid2
run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
test_31() {
- multiop $DIR/$tfile-1 O_tSc &
+ multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
pid1=$!
- multiop $DIR/$tfile-2 O_tSc &
+ multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
pid2=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile-1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
rm -f $DIR/$tfile-2
- fail mds1
+ fail $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
kill -USR1 $pid2
# tests for bug 2104; completion without crashing is success. The close is
# stale, but we always return 0 for close, so the app never sees it.
test_32() {
- multiop $DIR/$tfile O_c &
+ multiop_bg_pause $DIR/$tfile O_c || return 2
pid1=$!
- multiop $DIR/$tfile O_c &
+ multiop_bg_pause $DIR/$tfile O_c || return 3
pid2=$!
- # give multiop a chance to open
- sleep 1
mds_evict_client
df $MOUNT || sleep 1 && df $MOUNT || return 1
kill -USR1 $pid1
kill -USR1 $pid2
- sleep 1
+ wait $pid1 || return 4
+ wait $pid2 || return 5
return 0
}
run_test 32 "close() notices client eviction; close() after client eviction"
# Abort recovery before client complete
-test_33() {
- replay_barrier mds1
- touch $DIR/$tfile
- fail_abort mds1
+test_33a() { # was test_33
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR/$tfile-%d 100
+ fail_abort $SINGLEMDS
# this file should be gone, because the replay was aborted
- $CHECKSTAT -t file $DIR/$tfile && return 1
+ $CHECKSTAT -t file $DIR/$tfile-* && return 3
+ unlinkmany $DIR/$tfile-%d 0 100
+ return 0
+}
+run_test 33a "abort recovery before client does replay"
+
+# Stale FID sequence bug 15962
+test_33b() { # was test_33a
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR/$tfile-%d 10
+ fail_abort $SINGLEMDS
+ unlinkmany $DIR/$tfile-%d 0 10
+ # recreate shouldn't fail
+ createmany -o $DIR/$tfile-%d 10 || return 3
+ unlinkmany $DIR/$tfile-%d 0 10
return 0
}
-run_test 33 "abort recovery before client does replay"
+run_test 33b "fid shouldn't be reused after abort recovery"
test_34() {
- multiop $DIR/$tfile O_c &
+ multiop_bg_pause $DIR/$tfile O_c || return 2
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $DIR/$tfile
- replay_barrier mds1
- fail_abort mds1
+ replay_barrier $SINGLEMDS
+ fail_abort $SINGLEMDS
kill -USR1 $pid
+ wait $pid || return 3
[ -e $DIR/$tfile ] && return 1
sync
return 0
}
run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
-# bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
+# bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
test_35() {
touch $DIR/$tfile
#define OBD_FAIL_MDS_REINT_NET_REP 0x119
- do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
rm -f $DIR/$tfile &
sleep 1
sync
sleep 1
# give a chance to remove from MDS
- fail_abort mds1
+ fail_abort $SINGLEMDS
$CHECKSTAT -t file $DIR/$tfile && return 1 || true
}
run_test 35 "test recovery from llog for unlink op"
# b=2432 resent cancel after replay uses wrong cookie,
# so don't resend cancels
test_36() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
touch $DIR/$tfile
checkstat $DIR/$tfile
- facet_failover mds1
- cancel_lru_locks MDC
- if dmesg | grep "unknown lock cookie"; then
+ facet_failover $SINGLEMDS
+ cancel_lru_locks mdc
+ if dmesg | grep "unknown lock cookie"; then
echo "cancel after replay failed"
return 1
fi
# directory orphans can't be unlinked from PENDING directory
test_37() {
rmdir $DIR/$tfile 2>/dev/null
- multiop $DIR/$tfile dD_c &
+ multiop_bg_pause $DIR/$tfile dD_c || return 2
pid=$!
- # give multiop a chance to open
- sleep 1
rmdir $DIR/$tfile
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
# clear the dmesg buffer so we only see errors from this recovery
dmesg -c >/dev/null
- fail_abort mds1
+ fail_abort $SINGLEMDS
kill -USR1 $pid
dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
+ wait $pid || return 3
sync
return 0
}
test_38() {
createmany -o $DIR/$tfile-%d 800
unlinkmany $DIR/$tfile-%d 0 400
- replay_barrier mds1
- fail mds1
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
unlinkmany $DIR/$tfile-%d 400 400
sleep 2
$CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
}
run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
-test_39() {
+test_39() { # bug 4176
createmany -o $DIR/$tfile-%d 800
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
unlinkmany $DIR/$tfile-%d 0 400
- fail mds1
+ fail $SINGLEMDS
unlinkmany $DIR/$tfile-%d 400 400
sleep 2
$CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
count_ost_writes() {
- cat /proc/fs/lustre/osc/*/stats |
- awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
+ lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
}
#b=2477,2532
test_40(){
- $LCTL mark multiop $MOUNT/$tfile OS_c
+ $LCTL mark multiop $MOUNT/$tfile OS_c
multiop $MOUNT/$tfile OS_c &
PID=$!
writeme -s $MOUNT/${tfile}-2 &
WRITE_PID=$!
sleep 1
- facet_failover mds1
+ facet_failover $SINGLEMDS
#define OBD_FAIL_MDS_CONNECT_NET 0x117
- do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
kill -USR1 $PID
stat1=`count_ost_writes`
sleep $TIMEOUT
stat2=`count_ost_writes`
echo "$stat1, $stat2"
- if [ $stat1 -lt $stat2 ]; then
+ if [ $stat1 -lt $stat2 ]; then
echo "writes continuing during recovery"
RC=0
else
fi
echo "waiting for writeme $WRITE_PID"
kill $WRITE_PID
- wait $WRITE_PID
+ wait $WRITE_PID
echo "waiting for multiop $PID"
wait $PID || return 2
# the page, guarnateeing that the unlock from the RPC completion would
# assert on trying to unlock the unlocked page.
test_41() {
+ [ $OSTCOUNT -lt 2 ] && \
+ skip "skipping test 41: we don't have a second OST to test with" && \
+ return
+
local f=$MOUNT/$tfile
# make sure the start of the file is ost1
- lfs setstripe $f $((128 * 1024)) 0 0
+ lfs setstripe $f -s $((128 * 1024)) -i 0
do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
- cancel_lru_locks OSC
+ cancel_lru_locks osc
# fail ost2 and read from ost1
- local osc2_dev=`$LCTL device_list | \
- awk '(/ost2.*client_facet/){print $4}' `
- $LCTL --device %$osc2_dev deactivate
+ local osc2dev=`do_facet $SINGLEMDS "lctl get_param -n devices | grep ${ost2_svc}-osc-MDT0000" | awk '{print $1}'`
+ [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
+ do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
- $LCTL --device %$osc2_dev activate
+ do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
return 0
}
run_test 41 "read from a valid osc while other oscs are invalid"
# test MDS recovery after ost failure
test_42() {
- blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
+ blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
createmany -o $DIR/$tfile-%d 800
- replay_barrier ost
+ replay_barrier ost1
unlinkmany $DIR/$tfile-%d 0 400
- facet_failover ost
-
- # osc is evicted, fs is smaller
- blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
- [ $blocks_after -lt $blocks ] || return 1
+ debugsave
+ lctl set_param debug=-1
+ facet_failover ost1
+
+ # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
+ #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
+ #[ $blocks_after -lt $blocks ] || return 1
echo wait for MDS to timeout and recover
sleep $((TIMEOUT * 2))
+ debugrestore
unlinkmany $DIR/$tfile-%d 400 400
$CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
}
run_test 42 "recovery after ost failure"
-# b=2530
# timeout in MDS/OST recovery RPC will LBUG MDS
-test_43() {
- replay_barrier mds1
+test_43() { # bug 2530
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ replay_barrier $SINGLEMDS
# OBD_FAIL_OST_CREATE_NET 0x204
- do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
- facet_failover mds1
- df $MOUNT || return 1
+ do_facet ost1 "lctl set_param fail_loc=0x80000204"
+ fail $SINGLEMDS
sleep 10
- do_facet ost "sysctl -w lustre.fail_loc=0"
+ do_facet ost1 "lctl set_param fail_loc=0"
return 0
}
run_test 43 "mds osc import failure during recovery; don't LBUG"
-test_44() {
- mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
- do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
- $LCTL --device $mdcdev recover
- df $MOUNT
- do_facet mds "sysctl -w lustre.fail_loc=0"
+test_44a() { # was test_44
+ local at_max_saved=0
+
+ mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
+ [ "$mdcdev" ] || exit 2
+
+ # adaptive timeouts slow this way down
+ if at_is_enabled; then
+ at_max_saved=$(at_max_get mds)
+ at_max_set 40 mds
+ fi
+
+ for i in `seq 1 10`; do
+ echo "$i of 10 ($(date +%s))"
+ do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
+ #define OBD_FAIL_TGT_CONN_RACE 0x701
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
+ return 0
+}
+run_test 44a "race in target handle connect"
+
+test_44b() {
+ mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
+ [ "$mdcdev" ] || exit 2
+ for i in `seq 1 10`; do
+ echo "$i of 10 ($(date +%s))"
+ do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
+ #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
return 0
}
-run_test 44 "race in target handle connect"
+run_test 44b "race in target handle connect"
# Handle failed close
test_45() {
- mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
+ mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
+ [ "$mdcdev" ] || exit 2
$LCTL --device $mdcdev recover
- multiop $DIR/$tfile O_c &
+ multiop_bg_pause $DIR/$tfile O_c || return 1
pid=$!
- sleep 1
- # This will cause the CLOSE to fail before even
+ # This will cause the CLOSE to fail before even
# allocating a reply buffer
- $LCTL --device $mdcdev deactivate
+ $LCTL --device $mdcdev deactivate || return 4
# try the close
kill -USR1 $pid
wait $pid || return 1
- $LCTL --device $mdcdev activate
+ $LCTL --device $mdcdev activate || return 5
+ sleep 1
$CHECKSTAT -t file $DIR/$tfile || return 2
return 0
test_46() {
dmesg -c >/dev/null
drop_reply "touch $DIR/$tfile"
- fail mds1
+ fail $SINGLEMDS
# ironically, the previous test, 45, will cause a real forced close,
# so just look for one for this test
dmesg | grep -i "force closing client file handle for $tfile" && return 1
}
run_test 46 "Don't leak file handle after open resend (3325)"
-# b=2824
-test_47() {
+test_47() { # bug 2824
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
- # create some files to make sure precreate has been done on all
+ # create some files to make sure precreate has been done on all
# OSTs. (just in case this test is run independently)
createmany -o $DIR/$tfile 20 || return 1
# OBD_FAIL_OST_CREATE_NET 0x204
- fail ost
- do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
+ fail ost1
+ do_facet ost1 "lctl set_param fail_loc=0x80000204"
df $MOUNT || return 2
# let the MDS discover the OST failure, attempt to recover, fail
- # and recover again.
+ # and recover again.
sleep $((3 * TIMEOUT))
- # Without 2824, this createmany would hang
+ # Without 2824, this createmany would hang
createmany -o $DIR/$tfile 20 || return 3
unlinkmany $DIR/$tfile 20 || return 4
- do_facet ost "sysctl -w lustre.fail_loc=0"
+ do_facet ost1 "lctl set_param fail_loc=0"
return 0
}
run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
-
test_48() {
- createmany -o $DIR/${tfile}- 100
- $CHECKSTAT $DIR/${tfile}-99 || return 1
- mds_evict_client
- df $MOUNT || df $MOUNT || return 2
- sleep 1
- $CHECKSTAT $DIR/${tfile}-99 || return 3
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+ [ "$OSTCOUNT" -lt "2" ] && skip "$OSTCOUNT < 2 OSTs -- skipping" && return
- dmesg -c >/dev/null
- replay_barrier mds1
- fail mds1
- unlinkmany $DIR/${tfile}- 100 || return 4
- if dmesg | grep "back in time"; then
- echo "server went back in time!"
- return 5
- fi
- return 0
-}
-run_test 48 "Don't lose transno when client is evicted (2525)"
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR/$tfile 20 || return 1
+ # OBD_FAIL_OST_EROFS 0x216
+ facet_failover $SINGLEMDS
+ do_facet ost1 "lctl set_param fail_loc=0x80000216"
+ df $MOUNT || return 2
-# b=3550 - replay of unlink
-test_49() {
- replay_barrier mds1
- createmany -o $DIR/$tfile-%d 400 || return 1
- unlinkmany $DIR/$tfile-%d 0 400 || return 2
- fail mds1
- $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
+ createmany -o $DIR/$tfile 20 20 || return 2
+ unlinkmany $DIR/$tfile 40 || return 3
+ return 0
}
-run_test 49 "re-write records to llog as written during fail"
+run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
test_50() {
- local osc_dev=`$LCTL device_list | \
- awk '(/ost_svc_mds1_svc/){print $4}' `
- $LCTL --device %$osc_dev recover && $LCTL --device %$osc_dev recover
+ local oscdev=`do_facet $SINGLEMDS lctl get_param -n devices | grep ${ost1_svc}-osc-MDT0000 | awk '{print $1}'`
+ [ "$oscdev" ] || return 1
+ do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2
+ do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3
# give the mds_lov_sync threads a chance to run
sleep 5
}
# b3764 timed out lock replay
test_52() {
touch $DIR/$tfile
- cancel_lru_locks MDC
+ cancel_lru_locks mdc
- multiop $DIR/$tfile s
- replay_barrier mds1
- do_facet mds1 "sysctl -w lustre.fail_loc=0x8000030c"
- fail mds1
- do_facet mds1 "sysctl -w lustre.fail_loc=0x0"
+ multiop $DIR/$tfile s || return 1
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_LDLM_REPLY 0x30c
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
+ fail $SINGLEMDS || return 2
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
$CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
}
run_test 52 "time out lock replay (3764)"
-equals_msg test complete, cleaning up
-$CLEANUP
+# bug 3462 - simultaneous MDC requests
+test_53a() {
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+ # give multiop a change to open
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET 0x115
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ mcreate $DIR/${tdir}-2/f || return 1
+
+ # close should still be here
+ [ -d /proc/$close_pid ] || return 2
+
+ replay_barrier_nodf $SINGLEMDS
+ fail $SINGLEMDS
+ wait $close_pid || return 3
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53a "|X| close request while two MDC requests in flight"
+
+test_53b() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET 0x107
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+ wait $close_pid || return 1
+ # open should still be here
+ [ -d /proc/$open_pid ] || return 2
+
+ replay_barrier_nodf $SINGLEMDS
+ fail $SINGLEMDS
+ wait $open_pid || return 3
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53b "|X| open request while two MDC requests in flight"
+
+test_53c() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET 0x107
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET 0x115
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+
+ replay_barrier_nodf $SINGLEMDS
+ fail_nodf $SINGLEMDS
+ wait $open_pid || return 1
+ sleep 2
+ # close should be gone
+ [ -d /proc/$close_pid ] && return 2
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53c "|X| open request and close request while two MDC requests in flight"
+
+test_53d() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+ # give multiop a chance to open
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ mcreate $DIR/${tdir}-2/f || return 1
+
+ # close should still be here
+ [ -d /proc/$close_pid ] || return 2
+ fail $SINGLEMDS
+ wait $close_pid || return 3
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53d "|X| close reply while two MDC requests in flight"
+
+test_53e() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+ wait $close_pid || return 1
+ # open should still be here
+ [ -d /proc/$open_pid ] || return 2
+
+ replay_barrier_nodf $SINGLEMDS
+ fail $SINGLEMDS
+ wait $open_pid || return 3
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53e "|X| open reply while two MDC requests in flight"
+
+test_53f() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+
+ replay_barrier_nodf $SINGLEMDS
+ fail_nodf $SINGLEMDS
+ wait $open_pid || return 1
+ sleep 2
+ # close should be gone
+ [ -d /proc/$close_pid ] && return 2
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53f "|X| open reply and close reply while two MDC requests in flight"
+
+test_53g() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET 0x115
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ replay_barrier_nodf $SINGLEMDS
+ fail_nodf $SINGLEMDS
+ wait $open_pid || return 1
+ sleep 2
+ # close should be gone
+ [ -d /proc/$close_pid ] && return 2
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53g "|X| drop open reply and close request while close and open are both in flight"
+
+test_53h() {
+ rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
+
+ mkdir -p $DIR/${tdir}-1
+ mkdir -p $DIR/${tdir}-2
+ multiop $DIR/${tdir}-1/f O_c &
+ close_pid=$!
+
+ #define OBD_FAIL_MDS_REINT_NET 0x107
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
+ mcreate $DIR/${tdir}-2/f &
+ open_pid=$!
+ sleep 1
+
+ #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
+ kill -USR1 $close_pid
+ cancel_lru_locks mdc # force the close
+ sleep 1
+
+ replay_barrier_nodf $SINGLEMDS
+ fail_nodf $SINGLEMDS
+ wait $open_pid || return 1
+ sleep 2
+ # close should be gone
+ [ -d /proc/$close_pid ] && return 2
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+
+ $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
+ $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
+ rm -rf $DIR/${tdir}-*
+}
+run_test 53h "|X| open request and close reply while two MDC requests in flight"
+
+#b_cray 54 "|X| open request and close reply while two MDC requests in flight"
+
+#b3761 ASSERTION(hash != 0) failed
+test_55() {
+# OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
+ touch $DIR/$tfile &
+ # give touch a chance to run
+ sleep 5
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
+ rm $DIR/$tfile
+ return 0
+}
+run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
+
+#b3440 ASSERTION(rec->ur_fid2->id) failed
+test_56() {
+ ln -s foo $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ #drop_reply "cat $DIR/$tfile"
+ fail $SINGLEMDS
+ sleep 10
+}
+run_test 56 "don't replay a symlink open request (3440)"
+
+#recovery one mds-ost setattr from llog
+test_57() {
+#define OBD_FAIL_MDS_OST_SETATTR 0x12c
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
+ touch $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
+ sleep 1
+ $CHECKSTAT -t file $DIR/$tfile || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
+ rm $DIR/$tfile
+}
+run_test 57 "test recovery from llog for setattr op"
+
+#recovery many mds-ost setattr from llog
+test_58a() {
+ mkdir -p $DIR/$tdir
+#define OBD_FAIL_MDS_OST_SETATTR 0x12c
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
+ createmany -o $DIR/$tdir/$tfile-%d 2500
+ replay_barrier $SINGLEMDS
+ fail $SINGLEMDS
+ sleep 2
+ $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
+ unlinkmany $DIR/$tdir/$tfile-%d 2500
+ rmdir $DIR/$tdir
+}
+run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
+
+test_58b() {
+ mount_client $MOUNT2
+ mkdir -p $DIR/$tdir
+ touch $DIR/$tdir/$tfile
+ replay_barrier $SINGLEMDS
+ setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile
+ fail $SINGLEMDS
+ VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
+ [ x$VAL = x"bar" ] || return 1
+ rm -f $DIR/$tdir/$tfile
+ rmdir $DIR/$tdir
+ zconf_umount `hostname` $MOUNT2
+}
+run_test 58b "test replay of setxattr op"
+
+test_58c() { # bug 16570
+ mount_client $MOUNT2
+ mkdir -p $DIR/$tdir
+ touch $DIR/$tdir/$tfile
+ drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \
+ return 1
+ VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
+ [ x$VAL = x"bar" ] || return 2
+ drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \
+ return 3
+ VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile`
+ [ x$VAL = x"bar1" ] || return 4
+ rm -f $DIR/$tdir/$tfile
+ rmdir $DIR/$tdir
+ zconf_umount `hostname` $MOUNT2
+}
+run_test 58c "resend/reconstruct setxattr op"
+
+# log_commit_thread vs filter_destroy race used to lead to import use after free
+# bug 11658
+test_59() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/$tfile-%d 200
+ sync
+ unlinkmany $DIR/$tdir/$tfile-%d 200
+#define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
+ do_facet ost1 "lctl set_param fail_loc=0x507"
+ fail ost1
+ fail $SINGLEMDS
+ do_facet ost1 "lctl set_param fail_loc=0x0"
+ sleep 20
+ rmdir $DIR/$tdir
+}
+run_test 59 "test log_commit_thread vs filter_destroy race"
+
+# race between add unlink llog vs cat log init in post_recovery (only for b1_6)
+# bug 12086: should no oops and No ctxt error for this test
+test_60() {
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/$tfile-%d 200
+ replay_barrier $SINGLEMDS
+ unlinkmany $DIR/$tdir/$tfile-%d 0 100
+ fail $SINGLEMDS
+ unlinkmany $DIR/$tdir/$tfile-%d 100 100
+ local no_ctxt=`dmesg | grep "No ctxt"`
+ [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
+}
+run_test 60 "test llog post recovery init vs llog unlink"
+
+#test race llog recovery thread vs llog cleanup
+test_61a() { # was test_61
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/$tfile-%d 800
+ replay_barrier ost1
+# OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
+ unlinkmany $DIR/$tdir/$tfile-%d 800
+ set_nodes_failloc "$(osts_nodes)" 0x80000221
+ facet_failover ost1
+ sleep 10
+ fail ost1
+ sleep 30
+ set_nodes_failloc "$(osts_nodes)" 0x0
+
+ $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1
+ rmdir $DIR/$tdir
+}
+run_test 61a "test race llog recovery vs llog cleanup"
+
+#test race mds llog sync vs llog cleanup
+test_61b() {
+# OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a"
+ facet_failover $SINGLEMDS
+ sleep 10
+ fail $SINGLEMDS
+ do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1
+}
+run_test 61b "test race mds llog sync vs llog cleanup"
+
+#test race cancel cookie cb vs llog cleanup
+test_61c() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+# OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
+ touch $DIR/$tfile
+ set_nodes_failloc "$(osts_nodes)" 0x80000222
+ rm $DIR/$tfile
+ sleep 10
+ fail ost1
+ set_nodes_failloc "$(osts_nodes)" 0x0
+}
+run_test 61c "test race mds llog sync vs llog cleanup"
+
+test_61d() { # bug 16002 # bug 17466
+ shutdown_facet $SINGLEMDS
+#define OBD_FAIL_OBD_LLOG_SETUP 0x605
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x605"
+ start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS && error "mds start should have failed"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || error "cannot restart mds"
+}
+run_test 61d "error in llog_setup should cleanup the llog context correctly"
+
+test_62() { # Bug 15756 - don't mis-drop resent replay
+ mkdir -p $DIR/$tdir
+ replay_barrier $SINGLEMDS
+ createmany -o $DIR/$tdir/$tfile- 25
+#define OBD_FAIL_TGT_REPLAY_DROP 0x707
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
+ facet_failover $SINGLEMDS
+ df $MOUNT || return 1
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ unlinkmany $DIR/$tdir/$tfile- 25 || return 2
+ return 0
+}
+run_test 62 "don't mis-drop resent replay"
+
+#Adaptive Timeouts (bug 3055)
+AT_MAX_SET=0
+
+at_cleanup () {
+ local var
+ local facet
+ local at_new
+
+ echo "Cleaning up AT ..."
+ if [ -n "$ATOLDBASE" ]; then
+ local at_history=$($LCTL get_param -n at_history)
+ do_facet mds "lctl set_param at_history=$at_history" || true
+ do_facet ost1 "lctl set_param at_history=$at_history" || true
+ fi
+
+ if [ $AT_MAX_SET -ne 0 ]; then
+ for facet in mds client ost; do
+ var=AT_MAX_SAVE_${facet}
+ echo restore AT on $facet to saved value ${!var}
+ at_max_set ${!var} $facet
+ at_new=$(at_max_get $facet)
+ echo Restored AT value on $facet $at_new
+ [ $at_new -eq ${!var} ] || \
+ error "$facet : AT value was not restored SAVED ${!var} NEW $at_new"
+ done
+ fi
+}
+
+at_start()
+{
+ local at_max_new=600
+
+ # Save at_max original values
+ local facet
+ if [ $AT_MAX_SET -eq 0 ]; then
+ # Suppose that all osts have the same at_max
+ for facet in mds client ost; do
+ eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
+ done
+ fi
+ local at_max
+ for facet in mds client ost; do
+ at_max=$(at_max_get $facet)
+ if [ $at_max -ne $at_max_new ]; then
+ echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
+ at_max_set $at_max_new $facet
+ AT_MAX_SET=1
+ fi
+ done
+
+ if [ -z "$ATOLDBASE" ]; then
+ ATOLDBASE=$(do_facet mds "lctl get_param -n at_history")
+ # speed up the timebase so we can check decreasing AT
+ do_facet mds "lctl set_param at_history=8" || true
+ do_facet ost1 "lctl set_param at_history=8" || true
+
+ # sleep for a while to cool down, should be > 8s and also allow
+ # at least one ping to be sent. simply use TIMEOUT to be safe.
+ sleep $TIMEOUT
+ fi
+}
+
+test_65a() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ $LCTL dk > /dev/null
+ debugsave
+ sysctl -w lnet.debug="+other"
+ # Slow down a request to the current service time, this is critical
+ # because previous tests may have caused this value to increase.
+ REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
+ awk '/portal 12/ {print $5}'`
+ REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
+
+ do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000))
+#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
+ do_facet mds sysctl -w lustre.fail_loc=0x8000050a
+ createmany -o $DIR/$tfile 10 > /dev/null
+ unlinkmany $DIR/$tfile 10 > /dev/null
+ # check for log message
+ $LCTL dk | grep "Early reply #" || error "No early reply"
+ debugrestore
+ # client should show REQ_DELAY estimates
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
+ sleep 9
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
+}
+run_test 65a "AT: verify early replies"
+
+test_65b() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ # turn on D_ADAPTTO
+ debugsave
+ sysctl -w lnet.debug="other trace"
+ $LCTL dk > /dev/null
+ # Slow down a request to the current service time, this is critical
+ # because previous tests may have caused this value to increase.
+ REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
+ awk '/portal 6/ {print $5}'`
+ REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
+
+ do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
+#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
+ do_facet ost1 sysctl -w lustre.fail_loc=0x224
+
+ rm -f $DIR/$tfile
+ lfs setstripe $DIR/$tfile --index=0 --count=1
+ # force some real bulk transfer
+ multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
+
+ do_facet ost1 sysctl -w lustre.fail_loc=0
+ # check for log message
+ $LCTL dk | grep "Early reply #" || error "No early reply"
+ debugrestore
+ # client should show REQ_DELAY estimates
+ lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
+}
+run_test 65b "AT: verify early replies on packed reply / bulk"
+
+test_66a() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
+ # adjust 5s at a time so no early reply is sent (within deadline)
+ do_facet mds "sysctl -w lustre.fail_val=5000"
+#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
+ createmany -o $DIR/$tfile 20 > /dev/null
+ unlinkmany $DIR/$tfile 20 > /dev/null
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
+ do_facet mds "sysctl -w lustre.fail_val=10000"
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
+ createmany -o $DIR/$tfile 20 > /dev/null
+ unlinkmany $DIR/$tfile 20 > /dev/null
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
+ do_facet mds "sysctl -w lustre.fail_loc=0"
+ sleep 9
+ createmany -o $DIR/$tfile 20 > /dev/null
+ unlinkmany $DIR/$tfile 20 > /dev/null
+ lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
+ CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
+ WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
+ echo "Current MDT timeout $CUR, worst $WORST"
+ [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
+}
+run_test 66a "AT: verify MDT service time adjusts with no early replies"
+
+test_66b() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
+ sysctl -w lustre.fail_val=$(($ORIG + 5))
+#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
+ sysctl -w lustre.fail_loc=0x50c
+ ls $DIR/$tfile > /dev/null 2>&1
+ sysctl -w lustre.fail_loc=0
+ CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
+ WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
+ echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
+ [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
+}
+run_test 66b "AT: verify net latency adjusts"
+
+test_67a() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
+ # sleeping threads may drive values above this
+ do_facet ost1 "sysctl -w lustre.fail_val=400"
+#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x50a"
+ createmany -o $DIR/$tfile 20 > /dev/null
+ unlinkmany $DIR/$tfile 20 > /dev/null
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+ CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
+ ATTEMPTS=$(($CONN2 - $CONN1))
+ echo "$ATTEMPTS osc reconnect attempts on gradual slow"
+ [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect"
+ return 0
+}
+run_test 67a "AT: verify slow request processing doesn't induce reconnects"
+
+test_67b() #bug 3055
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
+#define OBD_FAIL_OST_PAUSE_CREATE 0x223
+ do_facet ost1 "sysctl -w lustre.fail_val=20000"
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
+ cp /etc/profile $DIR/$tfile || error "cp failed"
+ client_reconnect
+ do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
+ log "phase 2"
+ CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
+ ATTEMPTS=$(($CONN2 - $CONN1))
+ echo "$ATTEMPTS osc reconnect attempts on instant slow"
+ # do it again; should not timeout
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
+ cp /etc/profile $DIR/$tfile || error "cp failed"
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+ client_reconnect
+ do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
+ CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
+ ATTEMPTS=$(($CONN3 - $CONN2))
+ echo "$ATTEMPTS osc reconnect attempts on 2nd slow"
+ [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
+ return 0
+}
+run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
+
+test_68 () #bug 13813
+{
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
+ [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
+ local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min")
+ [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0
+ local ENQ_MIN=$(cat $ldlm_enqueue_min)
+ local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r")
+ echo $TIMEOUT >> $ldlm_enqueue_min
+ do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
+
+ rm -rf $DIR/$tdir
+ mkdir -p $DIR/$tdir
+ lfs setstripe $DIR/$tdir --index=0 --count=1
+#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
+ sysctl -w lustre.fail_val=$(($TIMEOUT - 1))
+ sysctl -w lustre.fail_loc=0x80000312
+ cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
+ sysctl -w lustre.fail_val=$((TIMEOUT * 5 / 4))
+ sysctl -w lustre.fail_loc=0x80000312
+ cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
+ sysctl -w lustre.fail_loc=0
+
+ echo $ENQ_MIN >> $ldlm_enqueue_min
+ do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
+ rm -rf $DIR/$tdir
+ return 0
+}
+run_test 68 "AT: verify slowing locks"
+
+at_cleanup
+# end of AT tests includes above lines
+
+
+# start multi-client tests
+test_70a () {
+ [ -z "$CLIENTS" ] && \
+ { skip "Need two or more clients." && return; }
+ [ $CLIENTCOUNT -lt 2 ] && \
+ { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
+
+ echo "mount clients $CLIENTS ..."
+ zconf_mount_clients $CLIENTS $DIR
+
+ local clients=${CLIENTS//,/ }
+ echo "Write/read files on $DIR ; clients $CLIENTS ... "
+ for CLIENT in $clients; do
+ do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
+ of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \
+ error "dd failed on $CLIENT"
+ done
+
+ local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
+ for C in ${CLIENTS//,/ }; do
+ do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \
+ error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
+ prev_client=$C
+ done
+
+ ls $DIR
+}
+run_test 70a "check multi client t-f"
+
+test_70b () {
+ local clients=${CLIENTS:-$HOSTNAME}
+
+ zconf_mount_clients $clients $DIR
+
+ local duration=300
+ [ "$SLOW" = "no" ] && duration=60
+ local cmd="rundbench 1 -t $duration"
+ local PID=""
+ do_nodes $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
+ PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
+ DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
+ LCTL=$LCTL $cmd" &
+ PID=$!
+ log "Started rundbench load PID=$PID ..."
+ ELAPSED=0
+ NUM_FAILOVERS=0
+ START_TS=$(date +%s)
+ CURRENT_TS=$START_TS
+ while [ $ELAPSED -lt $duration ]; do
+ sleep 1
+ replay_barrier $SINGLEMDS
+ sleep 1 # give clients a time to do operations
+ # Increment the number of failovers
+ NUM_FAILOVERS=$((NUM_FAILOVERS+1))
+ log "$TESTNAME fail mds1 $NUM_FAILOVERS times"
+ facet_failover $SINGLEMDS
+ CURRENT_TS=$(date +%s)
+ ELAPSED=$((CURRENT_TS - START_TS))
+ done
+ wait $PID || error "rundbench load on $CLIENTS failed!"
+}
+run_test 70b "mds recovery; $CLIENTCOUNT clients"
+# end multi-client tests
+
+test_73a() {
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
+ pid=$!
+ rm -f $DIR/$tfile
+
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_LDLM_ENQUEUE 0x302
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
+ fail $SINGLEMDS
+ kill -USR1 $pid
+ wait $pid || return 1
+ [ -e $DIR/$tfile ] && return 2
+ return 0
+}
+run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay , close"
+
+test_73b() {
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
+ pid=$!
+ rm -f $DIR/$tfile
+
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_LDLM_REPLY 0x30c
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
+ fail $SINGLEMDS
+ kill -USR1 $pid
+ wait $pid || return 1
+ [ -e $DIR/$tfile ] && return 2
+ return 0
+}
+run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close"
+
+test_73c() {
+ multiop_bg_pause $DIR/$tfile O_tSc || return 3
+ pid=$!
+ rm -f $DIR/$tfile
+
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_TGT_LAST_REPLAY 0x710
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000710"
+ fail $SINGLEMDS
+ kill -USR1 $pid
+ wait $pid || return 1
+ [ -e $DIR/$tfile ] && return 2
+ return 0
+}
+run_test 73c "open(O_CREAT), unlink, replay, reconnect at last_replay, close"
+
+# bug 18554
+test_74() {
+ stop ost1
+ zconf_umount $(hostname) $MOUNT
+ fail $SINGLEMDS
+ zconf_mount $(hostname) $MOUNT
+ mount_facet ost1
+ touch $DIR/$tfile || return 1
+ rm $DIR/$tfile || return 2
+ df $MOUNT || error "df failed: $?"
+ return 0
+}
+run_test 74 "Ensure applications don't fail waiting for OST reocvery"
+
+test_80a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ mkdir -p $DIR/$tdir
+ replay_barrier mds2
+ $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
+ rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
+ fail mds2
+ stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
+ return 0
+}
+run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
+
+test_80b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ mkdir -p $DIR/$tdir
+ replay_barrier mds1
+ $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
+ rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
+ fail mds1
+ stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
+ return 0
+}
+run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
+
+test_81a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
+ sleep 10
+ $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
+ $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
+ replay_barrier mds1
+ rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
+ fail mds1
+ stat $DIR/$tdir/f1002
+}
+run_test 81a "CMD: unlink cross-node file (fail mds with name)"
+
+test_82a() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ local dir=$DIR/d82a
+ replay_barrier mds2
+ mkdir $dir || error "mkdir $dir failed"
+ log "FAILOVER mds2"
+ fail mds2
+ stat $DIR
+ $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
+}
+run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
+
+test_82b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ local dir=$DIR/d82b
+ replay_barrier mds1
+ mkdir $dir || error "mkdir $dir failed"
+ log "FAILOVER mds1"
+ fail mds1
+ stat $DIR
+ $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
+}
+run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
+
+test_84() {
+#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x143
+ do_facet mds "lctl set_param fail_loc=0x80000143"
+ createmany -o $DIR/$tfile- 1 &
+ PID=$!
+ mds_evict_client
+ wait $PID
+ df -P $DIR || df -P $DIR || true # reconnect
+}
+run_test 84 "stale open during export disconnect"
+equals_msg `basename $0`: test complete, cleaning up
+check_and_cleanup_lustre
+[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true