-#!/bin/sh
+#!/bin/bash
set -e
+#set -v
#
# This test needs to be run on the client
LUSTRE=${LUSTRE:-`dirname $0`/..}
. $LUSTRE/tests/test-framework.sh
-
init_test_env $@
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-. ${CONFIG:=$LUSTRE/tests/cfg/local.sh}
# Skip these tests
-ALWAYS_EXCEPT="35"
-
+# bug number: 4176
+ALWAYS_EXCEPT="39 $REPLAY_SINGLE_EXCEPT"
gen_config() {
rm -f $XMLCONFIG
add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE
fi
- add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
+ add_lov lov1 mds --stripe_sz $STRIPE_BYTES \
--stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
build_test_filter
-cleanup() {
- # make sure we are using the primary MDS, so the config log will
- # be able to clean up properly.
- activemds=`facet_active mds`
- if [ $activemds != "mds" ]; then
- fail mds
- fi
- zconf_umount $MOUNT
- stop mds ${FORCE} $MDSLCONFARGS
- stop ost2 ${FORCE} --dump cleanup.log
- stop ost ${FORCE} --dump cleanup.log
-}
+SETUP=${SETUP:-"setup"}
+CLEANUP=${CLEANUP:-"cleanupall"}
if [ "$ONLY" == "cleanup" ]; then
- sysctl -w portals.debug=0 || true
- cleanup
- exit
+ sysctl -w lnet.debug=0 || true
+ $CLEANUP
+ exit 0
fi
-gen_config
+setup() {
+ formatall
+ setupall
+}
-start ost --reformat $OSTLCONFARGS
-start ost2 --reformat $OSTLCONFARGS
-[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
-start mds $MDSLCONFARGS --reformat
-zconf_mount $MOUNT
+$SETUP
if [ "$ONLY" == "setup" ]; then
exit 0
}
run_test 0 "empty replay"
+test_0b() {
+ # this test attempts to trigger a race in the precreation code,
+ # and must run before any other objects are created on the filesystem
+ fail ost1
+ createmany -o $DIR/$tfile 20 || return 1
+ unlinkmany $DIR/$tfile 20 || return 2
+}
+run_test 0b "ensure object created after recover exists. (3284)"
+
test_1() {
replay_barrier mds
mcreate $DIR/$tfile
}
run_test 1 "simple create"
+test_1a() {
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+
+ rm -fr $DIR/$tfile
+ local old_last_id=`cat $LPROC/obdfilter/*/last_id`
+ touch -o $DIR/$tfile 1
+ sync
+ local new_last_id=`cat $LPROC/obdfilter/*/last_id`
+
+ test "$old_last_id" = "$new_last_id" || {
+ echo "OST object create is caused by MDS"
+ return 1
+ }
+
+ old_last_id=`cat $LPROC/obdfilter/*/last_id`
+ echo "data" > $DIR/$tfile
+ sync
+ new_last_id=`cat $LPROC/obdfilter/*/last_id`
+ test "$old_last_id" = "$new_last_id "&& {
+ echo "CROW does not work on write"
+ return 1
+ }
+
+ rm -fr $DIR/$tfile
+
+#define OBD_FAIL_OST_CROW_EIO | OBD_FAIL_ONCE
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000801"
+
+ rm -fr $DIR/1a1
+ old_last_id=`cat $LPROC/obdfilter/*/last_id`
+ echo "data" > $DIR/1a1
+ sync
+ new_last_id=`cat $LPROC/obdfilter/*/last_id`
+ test "$old_last_id" = "$new_last_id" || {
+ echo "CROW does work with fail_loc=0x80000801"
+ return 1
+ }
+
+ rm -fr $DIR/1a1
+
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+}
+#CROW run_test 1a "CROW object create (check OST last_id)"
+
test_2a() {
replay_barrier mds
touch $DIR/$tfile
}
run_test 2b "touch"
-test_3() {
+test_3a() {
replay_barrier mds
mcreate $DIR/$tfile
o_directory $DIR/$tfile
$CHECKSTAT -t file $DIR/$tfile || return 2
rm $DIR/$tfile
}
-run_test 3 "replay failed open"
+run_test 3a "replay failed open(O_DIRECTORY)"
+
+test_3b() {
+ replay_barrier mds
+#define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
+ touch $DIR/$tfile
+ do_facet mds "sysctl -w lustre.fail_loc=0"
+ fail mds
+ $CHECKSTAT -t file $DIR/$tfile && return 2
+ return 0
+}
+run_test 3b "replay failed open -ENOMEM"
+
+test_3c() {
+ replay_barrier mds
+#define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
+ touch $DIR/$tfile
+ do_facet mds "sysctl -w lustre.fail_loc=0"
+ fail mds
+
+ $CHECKSTAT -t file $DIR/$tfile && return 2
+ return 0
+}
+run_test 3c "replay failed open -ENOMEM"
test_4() {
replay_barrier mds
done
fail mds
for i in `seq 10`; do
- grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
+ grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
done
}
run_test 4 "|x| 10 open(O_CREAT)s"
sleep 1
rm -f $DIR/$tfile
touch $DIR/$tfile-2 || return 1
+ echo "pid: $pid will close"
kill -USR1 $pid
wait $pid || return 2
}
run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
+test_20b() { # bug 10480
+ BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+
+ dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
+ pid=$!
+ while [ ! -e $DIR/$tfile ] ; do
+ usleep 60 # give dd a chance to start
+ done
+
+ lfs getstripe $DIR/$tfile || return 1
+ rm -f $DIR/$tfile || return 2 # make it an orphan
+ mds_evict_client
+ df -P $DIR || df -P $DIR || true # reconnect
+
+ fail mds # start orphan recovery
+ df -P $DIR || df -P $DIR || true # reconnect
+ sleep 2
+
+ AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+ log "before $BEFOREUSED, after $AFTERUSED"
+ [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
+ error "after $AFTERUSED > before $BEFOREUSED" && return 5
+ return 0
+}
+run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
+
+test_20c() { # bug 10480
+ dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000
+
+ exec 100< $DIR/$tfile
+
+ ls -la $DIR/$tfile
+
+ mds_evict_client
+
+ df -P $DIR || df -P $DIR || true # reconnect
+
+ exec 100<&-
+
+ test -s $DIR/$tfile || error "File was truncated"
+
+ return 0
+}
+run_test 20c "check that client eviction does not affect file content"
+
test_21() {
replay_barrier mds
multiop $DIR/$tfile O_tSc &
# give multiop a chance to open
sleep 1
mds_evict_client
- df $MOUNT || df $MOUNT || return 1
+ df $MOUNT || sleep 1 && df $MOUNT || return 1
kill -USR1 $pid1
kill -USR1 $pid2
sleep 1
touch $DIR/$tfile
fail_abort mds
# this file should be gone, because the replay was aborted
- $CHECKSTAT -t file $DIR/$tfile && return 1
+ $CHECKSTAT -t file $DIR/$tfile && return 3
return 0
}
run_test 33 "abort recovery before client does replay"
fail_abort mds
kill -USR1 $pid
[ -e $DIR/$tfile ] && return 1
- sleep 3
- # wait for commitment of removal
+ sync
return 0
}
run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
test_35() {
touch $DIR/$tfile
- echo 0x80000119 > /proc/sys/lustre/fail_loc
+#define OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
rm -f $DIR/$tfile &
sleep 1
+ sync
+ sleep 1
# give a chance to remove from MDS
fail_abort mds
$CHECKSTAT -t file $DIR/$tfile && return 1 || true
}
run_test 35 "test recovery from llog for unlink op"
-equals_msg test complete, cleaning up
-cleanup
+# b=2432 resent cancel after replay uses wrong cookie,
+# so don't resend cancels
+test_36() {
+ replay_barrier mds
+ touch $DIR/$tfile
+ checkstat $DIR/$tfile
+ facet_failover mds
+ cancel_lru_locks mdc
+ if dmesg | grep "unknown lock cookie"; then
+ echo "cancel after replay failed"
+ return 1
+ fi
+}
+run_test 36 "don't resend cancel"
+
+# b=2368
+# directory orphans can't be unlinked from PENDING directory
+test_37() {
+ rmdir $DIR/$tfile 2>/dev/null
+ multiop $DIR/$tfile dD_c &
+ pid=$!
+ # give multiop a chance to open
+ sleep 1
+ rmdir $DIR/$tfile
+
+ replay_barrier mds
+ # clear the dmesg buffer so we only see errors from this recovery
+ dmesg -c >/dev/null
+ fail_abort mds
+ kill -USR1 $pid
+ dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
+ sync
+ return 0
+}
+run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
+
+test_38() {
+ createmany -o $DIR/$tfile-%d 800
+ unlinkmany $DIR/$tfile-%d 0 400
+ replay_barrier mds
+ fail mds
+ unlinkmany $DIR/$tfile-%d 400 400
+ sleep 2
+ $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
+}
+run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
+
+test_39() { # bug 4176
+ createmany -o $DIR/$tfile-%d 800
+ replay_barrier mds
+ unlinkmany $DIR/$tfile-%d 0 400
+ fail mds
+ unlinkmany $DIR/$tfile-%d 400 400
+ sleep 2
+ ls -1f $DIR/$tfile-*
+ $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
+}
+run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
+
+count_ost_writes() {
+ awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }' $LPROC/osc/*/stats
+}
+
+#b=2477,2532
+test_40(){
+ $LCTL mark multiop $MOUNT/$tfile OS_c
+ multiop $MOUNT/$tfile OS_c &
+ PID=$!
+ writeme -s $MOUNT/${tfile}-2 &
+ WRITE_PID=$!
+ sleep 1
+ facet_failover mds
+#define OBD_FAIL_MDS_CONNECT_NET 0x117
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
+ kill -USR1 $PID
+ stat1=`count_ost_writes`
+ sleep $TIMEOUT
+ stat2=`count_ost_writes`
+ echo "$stat1, $stat2"
+ if [ $stat1 -lt $stat2 ]; then
+ echo "writes continuing during recovery"
+ RC=0
+ else
+ echo "writes not continuing during recovery, bug 2477"
+ RC=4
+ fi
+ echo "waiting for writeme $WRITE_PID"
+ kill $WRITE_PID
+ wait $WRITE_PID
+
+ echo "waiting for multiop $PID"
+ wait $PID || return 2
+ do_facet client munlink $MOUNT/$tfile || return 3
+ do_facet client munlink $MOUNT/${tfile}-2 || return 3
+ return $RC
+}
+run_test 40 "cause recovery in ptlrpc, ensure IO continues"
+
+
+#b=2814
+# make sure that a read to one osc doesn't try to double-unlock its page just
+# because another osc is invalid. trigger_group_io used to mistakenly return
+# an error if any oscs were invalid even after having successfully put rpcs
+# on valid oscs. This was fatal if the caller was ll_readpage who unlocked
+# the page, guarnateeing that the unlock from the RPC completion would
+# assert on trying to unlock the unlocked page.
+test_41() {
+ local f=$MOUNT/$tfile
+ # make sure the start of the file is ost1
+ lfs setstripe $f $((128 * 1024)) 0 0
+ do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
+ cancel_lru_locks osc
+ # fail ost2 and read from ost1
+ local osc2dev=`grep ${ost2_svc}-osc- $LPROC/devices | awk '{print $1}'`
+ [ "$osc2dev" ] || return 4
+ $LCTL --device $osc2dev deactivate || return 1
+ do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
+ $LCTL --device $osc2dev activate || return 2
+ return 0
+}
+run_test 41 "read from a valid osc while other oscs are invalid"
+
+# test MDS recovery after ost failure
+test_42() {
+ blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
+ createmany -o $DIR/$tfile-%d 800
+ replay_barrier ost1
+ unlinkmany $DIR/$tfile-%d 0 400
+ DEBUG42="`sysctl -n lnet.debug`"
+ sysctl -w lnet.debug=-1
+ facet_failover ost1
+
+ # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
+ #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
+ #[ $blocks_after -lt $blocks ] || return 1
+ echo wait for MDS to timeout and recover
+ sleep $((TIMEOUT * 2))
+ sysctl -w lnet.debug=$DEBUG42
+ unlinkmany $DIR/$tfile-%d 400 400
+ $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
+}
+run_test 42 "recovery after ost failure"
+
+# timeout in MDS/OST recovery RPC will LBUG MDS
+test_43() { # bug 2530
+ replay_barrier mds
+
+ # OBD_FAIL_OST_CREATE_NET 0x204
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204"
+ fail mds
+ sleep 10
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+
+ return 0
+}
+run_test 43 "mds osc import failure during recovery; don't LBUG"
+
+test_44() {
+ mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices`
+ [ "$mdcdev" ] || exit 2
+ for i in `seq 1 10`; do
+ #define OBD_FAIL_TGT_CONN_RACE 0x701
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet mds "sysctl -w lustre.fail_loc=0"
+ return 0
+}
+run_test 44 "race in target handle connect"
+
+test_44b() {
+ mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices`
+ [ "$mdcdev" ] || exit 2
+ for i in `seq 1 10`; do
+ #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
+ do_facet mds "sysctl -w lustre.fail_loc=0x80000704"
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet mds "sysctl -w lustre.fail_loc=0"
+ return 0
+}
+run_test 44b "race in target handle connect"
+
+# Handle failed close
+test_45() {
+ mdcdev=`awk '/-mdc-/ {print $1}' $LPROC/devices`
+ [ "$mdcdev" ] || exit 2
+ $LCTL --device $mdcdev recover
+
+ multiop $DIR/$tfile O_c &
+ pid=$!
+ sleep 1
+
+ # This will cause the CLOSE to fail before even
+ # allocating a reply buffer
+ $LCTL --device $mdcdev deactivate || return 4
+
+ # try the close
+ kill -USR1 $pid
+ wait $pid || return 1
+
+ $LCTL --device $mdcdev activate || return 5
+ sleep 1
+
+ $CHECKSTAT -t file $DIR/$tfile || return 2
+ return 0
+}
+run_test 45 "Handle failed close"
+
+test_46() {
+ dmesg -c >/dev/null
+ drop_reply "touch $DIR/$tfile"
+ fail mds
+ # ironically, the previous test, 45, will cause a real forced close,
+ # so just look for one for this test
+ dmesg | grep -i "force closing client file handle for $tfile" && return 1
+ return 0
+}
+run_test 46 "Don't leak file handle after open resend (3325)"
+
+test_47() { # bug 2824
+ # create some files to make sure precreate has been done on all
+ # OSTs. (just in case this test is run independently)
+ createmany -o $DIR/$tfile 20 || return 1
+
+ # OBD_FAIL_OST_CREATE_NET 0x204
+ fail ost1
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204"
+ df $MOUNT || return 2
+
+ # let the MDS discover the OST failure, attempt to recover, fail
+ # and recover again.
+ sleep $((3 * TIMEOUT))
+
+ # Without 2824, this createmany would hang
+ createmany -o $DIR/$tfile 20 || return 3
+ unlinkmany $DIR/$tfile 20 || return 4
+
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+ return 0
+}
+run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
+
+test_48() {
+ replay_barrier mds
+ createmany -o $DIR/$tfile 20 || return 1
+ # OBD_FAIL_OST_EROFS 0x216
+ fail mds
+ do_facet ost1 "sysctl -w lustre.fail_loc=0x80000216"
+ df $MOUNT || return 2
+
+ createmany -o $DIR/$tfile 20 20 || return 2
+ unlinkmany $DIR/$tfile 40 || return 3
+
+ do_facet ost1 "sysctl -w lustre.fail_loc=0"
+ return 0
+}
+run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
+
+test_50() {
+ local oscdev=`grep ${ost1_svc}-osc- $LPROC/devices | awk '{print $1}'`
+ [ "$oscdev" ] || return 1
+ $LCTL --device $oscdev recover && $LCTL --device $oscdev recover
+ # give the mds_lov_sync threads a chance to run
+ sleep 5
+}
+run_test 50 "Double OSC recovery, don't LASSERT (3812)"
+
+# b3764 timed out lock replay
+test_52() {
+ touch $DIR/$tfile
+ cancel_lru_locks mdc
+
+ multiop $DIR/$tfile s || return 1
+ replay_barrier mds
+#define OBD_FAIL_LDLM_REPLY 0x30c
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000030c"
+ fail mds || return 2
+ do_facet mds "sysctl -w lustre.fail_loc=0x0"
+
+ $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
+}
+run_test 52 "time out lock replay (3764)"
+
+#b_cray 53 "|X| open request and close reply while two MDC requests in flight"
+#b_cray 54 "|X| open request and close reply while two MDC requests in flight"
+
+#b3761 ASSERTION(hash != 0) failed
+test_55() {
+# OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000012b"
+ touch $DIR/$tfile &
+ # give touch a chance to run
+ sleep 5
+ do_facet mds "sysctl -w lustre.fail_loc=0x0"
+ rm $DIR/$tfile
+ return 0
+}
+run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
+
+#b3440 ASSERTION(rec->ur_fid2->id) failed
+test_56() {
+ ln -s foo $DIR/$tfile
+ replay_barrier mds
+ #drop_reply "cat $DIR/$tfile"
+ fail mds
+ sleep 10
+}
+run_test 56 "don't replay a symlink open request (3440)"
+
+#recovery one mds-ost setattr from llog
+test_57() {
+#define OBD_FAIL_MDS_OST_SETATTR 0x12c
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000012c"
+ touch $DIR/$tfile
+ replay_barrier mds
+ fail mds
+ sleep 1
+ $CHECKSTAT -t file $DIR/$tfile || return 1
+ do_facet mds "sysctl -w lustre.fail_loc=0x0"
+ rm $DIR/$tfile
+}
+run_test 57 "test recovery from llog for setattr op"
+
+#recovery many mds-ost setattr from llog
+test_58() {
+#define OBD_FAIL_MDS_OST_SETATTR 0x12c
+ do_facet mds "sysctl -w lustre.fail_loc=0x8000012c"
+ mkdir $DIR/$tdir
+ createmany -o $DIR/$tdir/$tfile-%d 2500
+ replay_barrier mds
+ fail mds
+ sleep 2
+ $CHECKSTAT -t file $DIR/$tdir/$tfile-* || return 1
+ do_facet mds "sysctl -w lustre.fail_loc=0x0"
+ unlinkmany $DIR/$tdir/$tfile-%d 2500
+ rmdir $DIR/$tdir
+}
+run_test 58 "test recovery from llog for setattr op (test llog_gen_rec)"
+
+equals_msg `basename $0`: test complete, cleaning up
+$CLEANUP