# This test needs to be run on the client
#
SAVE_PWD=$PWD
+export MULTIOP=${MULTIOP:-multiop}
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
SETUP=${SETUP:-}
CLEANUP=${CLEANUP:-}
require_dsh_mds || exit 0
# Skip these tests
-# bug number: 17466 18857
-ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT"
+# bug number: 17466 18857 LU1867
+ALWAYS_EXCEPT="61d 33a 33b 89 $REPLAY_SINGLE_EXCEPT"
+
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+# bug number for skipped test: LU-2342 LU-951
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 20b 70a 73a"
# 63 min 7 min AT AT AT AT"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
mkdir -p $DIR
assert_DIR
-rm -rf $DIR/[df][0-9]*
+rm -rf $DIR/[df][0-9]* $DIR/f.$TESTSUITE.*
+
+# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
+if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
+ sync
+ do_facet $SINGLEMDS sync
+fi
test_0a() { # was test_0
- sleep 10
mkdir $DIR/$tfile
replay_barrier $SINGLEMDS
fail $SINGLEMDS
}
run_test 0b "ensure object created after recover exists. (3284)"
+test_0c() {
+ replay_barrier $SINGLEMDS
+ mcreate $DIR/$tfile
+ umount $MOUNT
+ facet_failover $SINGLEMDS
+ zconf_mount `hostname` $MOUNT || error "mount fails"
+ client_up || error "post-failover df failed"
+ # file shouldn't exist if replay-barrier works as expected
+ rm $DIR/$tfile && error "File exists and it shouldn't"
+ return 0
+}
+run_test 0c "check replay-barrier"
+
test_0d() {
replay_barrier $SINGLEMDS
umount $MOUNT
usleep 60 # give dd a chance to start
done
- lfs getstripe $DIR/$tfile || return 1
+ $GETSTRIPE $DIR/$tfile || return 1
rm -f $DIR/$tfile || return 2 # make it an orphan
mds_evict_client
client_up || client_up || true # reconnect
replay_barrier $SINGLEMDS
# clear the dmesg buffer so we only see errors from this recovery
- dmesg -c >/dev/null
+ do_facet $SINGLEMDS dmesg -c >/dev/null
fail_abort $SINGLEMDS
kill -USR1 $pid
- dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
+ do_facet $SINGLEMDS dmesg | grep "error .* unlinking .* from PENDING" &&
+ return 1
wait $pid || return 3
sync
return 0
# the page, guarnateeing that the unlock from the RPC completion would
# assert on trying to unlock the unlocked page.
test_41() {
- [ $OSTCOUNT -lt 2 ] && \
- skip_env "skipping test 41: we don't have a second OST to test with" && \
- return
+ [ $OSTCOUNT -lt 2 ] &&
+ skip_env "skipping test 41: we don't have a second OST to test with" &&
+ return
local f=$MOUNT/$tfile
# make sure the start of the file is ost1
- lfs setstripe $f -s $((128 * 1024)) -i 0
+ $SETSTRIPE -S $((128 * 1024)) -i 0 $f
do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
cancel_lru_locks osc
# fail ost2 and read from ost1
local mdtosc=$(get_mdtosc_proc_path $SINGLEMDS $ost2_svc)
local osc2dev=$(do_facet $SINGLEMDS "lctl get_param -n devices" | \
grep $mdtosc | awk '{print $1}')
- [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
+ [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices &&
+ return 4
do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
run_test 43 "mds osc import failure during recovery; don't LBUG"
test_44a() { # was test_44
- local at_max_saved=0
-
- mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
- [ "$mdcdev" ] || return 2
- [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
-
- # adaptive timeouts slow this way down
- if at_is_enabled; then
- at_max_saved=$(at_max_get mds)
- at_max_set 40 mds
- fi
-
- for i in `seq 1 10`; do
- echo "$i of 10 ($(date +%s))"
- do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
- #define OBD_FAIL_TGT_CONN_RACE 0x701
- do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
- # lctl below may fail, it is valid case
- $LCTL --device $mdcdev recover
- df $MOUNT
- done
- do_facet $SINGLEMDS "lctl set_param fail_loc=0"
- [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
- return 0
+ local at_max_saved=0
+
+ local mdcdev=$($LCTL get_param -n devices |
+ awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}")
+ [ "$mdcdev" ] || return 2
+ [ $(echo $mdcdev | wc -w) -eq 1 ] ||
+ { echo mdcdev=$mdcdev; $LCTL dl; return 3; }
+
+ # adaptive timeouts slow this way down
+ if at_is_enabled; then
+ at_max_saved=$(at_max_get mds)
+ at_max_set 40 mds
+ fi
+
+ for i in `seq 1 10`; do
+ echo "$i of 10 ($(date +%s))"
+ do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
+#define OBD_FAIL_TGT_CONN_RACE 0x701
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
+ # lctl below may fail, it is valid case
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
+ return 0
}
run_test 44a "race in target handle connect"
test_44b() {
- local mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
- [ "$mdcdev" ] || return 2
- [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
-
- for i in `seq 1 10`; do
- echo "$i of 10 ($(date +%s))"
- do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
+ local mdcdev=$($LCTL get_param -n devices |
+ awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}")
+ [ "$mdcdev" ] || return 2
+ [ $(echo $mdcdev | wc -w) -eq 1 ] ||
+ { echo mdcdev=$mdcdev; $LCTL dl; return 3; }
+
+ for i in `seq 1 10`; do
+ echo "$i of 10 ($(date +%s))"
+ do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
#define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
- do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
# lctl below may fail, it is valid case
- $LCTL --device $mdcdev recover
- df $MOUNT
- done
- do_facet $SINGLEMDS "lctl set_param fail_loc=0"
- return 0
+ $LCTL --device $mdcdev recover
+ df $MOUNT
+ done
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+ return 0
}
run_test 44b "race in target handle connect"
test_44c() {
- replay_barrier $SINGLEMDS
- createmany -m $DIR/$tfile-%d 100
+ replay_barrier $SINGLEMDS
+ createmany -m $DIR/$tfile-%d 100 || error "failed to create directories"
#define OBD_FAIL_TGT_RCVG_FLAG 0x712
- do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712"
- fail_abort $SINGLEMDS
- unlinkmany $DIR/$tfile-%d 100 && return 1
- fail $SINGLEMDS
- unlinkmany $DIR/$tfile-%d 100 && return 1
- return 0
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712"
+ fail_abort $SINGLEMDS
+ unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail abort"
+ fail $SINGLEMDS
+ unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail"
+ return 0
}
run_test 44c "race in target handle connect"
# Handle failed close
test_45() {
- mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
- [ "$mdcdev" ] || return 2
- [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; }
+ local mdcdev=$($LCTL get_param -n devices |
+ awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}")
+ [ "$mdcdev" ] || return 2
+ [ $(echo $mdcdev | wc -w) -eq 1 ] ||
+ { echo mdcdev=$mdcdev; $LCTL dl; return 3; }
- $LCTL --device $mdcdev recover || return 6
+ $LCTL --device $mdcdev recover || return 6
- multiop_bg_pause $DIR/$tfile O_c || return 1
- pid=$!
+ multiop_bg_pause $DIR/$tfile O_c || return 1
+ pid=$!
- # This will cause the CLOSE to fail before even
- # allocating a reply buffer
- $LCTL --device $mdcdev deactivate || return 4
+ # This will cause the CLOSE to fail before even
+ # allocating a reply buffer
+ $LCTL --device $mdcdev deactivate || return 4
- # try the close
- kill -USR1 $pid
- wait $pid || return 1
+ # try the close
+ kill -USR1 $pid
+ wait $pid || return 1
- $LCTL --device $mdcdev activate || return 5
- sleep 1
+ $LCTL --device $mdcdev activate || return 5
+ sleep 1
- $CHECKSTAT -t file $DIR/$tfile || return 2
- return 0
+ $CHECKSTAT -t file $DIR/$tfile || return 2
+ return 0
}
run_test 45 "Handle failed close"
$LCTL dk > /dev/null
# Slow down a request to the current service time, this is critical
# because previous tests may have caused this value to increase.
- lfs setstripe $DIR/$tfile --index=0 --count=1
+ $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tfile
multiop $DIR/$tfile Ow1yc
REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
awk '/portal 6/ {print $5}'`
do_facet ost1 $LCTL set_param fail_loc=0x224
rm -f $DIR/$tfile
- lfs setstripe $DIR/$tfile --index=0 --count=1
+ $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tfile
# force some real bulk transfer
multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
test_66b() #bug 3055
{
- remote_ost_nodsh && skip "remote OST with nodsh" && return 0
-
- at_start || return 0
- ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
- $LCTL set_param fail_val=$(($ORIG + 5))
-#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
- $LCTL set_param fail_loc=0x50c
- ls $DIR/$tfile > /dev/null 2>&1
- $LCTL set_param fail_loc=0
- CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
- WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
- echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
- [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ at_start || return 0
+ ORIG=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
+ awk '/network/ {print $4}')
+ $LCTL set_param fail_val=$(($ORIG + 5))
+ #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
+ $LCTL set_param fail_loc=0x50c
+ ls $DIR/$tfile > /dev/null 2>&1
+ $LCTL set_param fail_loc=0
+ CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
+ awk '/network/ {print $4}')
+ WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
+ awk '/network/ {print $6}')
+ echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
+ [ $WORST -gt $ORIG ] ||
+ error "Worst $WORST should be worse than orig $ORIG"
}
run_test 66b "AT: verify net latency adjusts"
osc.$mdtosc.prealloc_next_id)
mkdir -p $DIR/$tdir/${OST}
- lfs setstripe $DIR/$tdir/${OST} -o 0 -c 1 || error "setstripe"
+ $SETSTRIPE -i 0 -c 1 $DIR/$tdir/${OST} || error "$SETSTRIPE"
echo "Creating to objid $last_id on ost $OST..."
#define OBD_FAIL_OST_PAUSE_CREATE 0x223
do_facet ost1 "$LCTL set_param fail_val=20000"
rm -rf $DIR/$tdir
mkdir -p $DIR/$tdir
- lfs setstripe $DIR/$tdir --index=0 --count=1
+ $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tdir
#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
$LCTL set_param fail_val=$(($TIMEOUT - 1))
$LCTL set_param fail_loc=0x80000312
local clients=${CLIENTS:-$HOSTNAME}
zconf_mount_clients $clients $MOUNT
-
+
local duration=300
- [ "$SLOW" = "no" ] && duration=60
+ [ "$SLOW" = "no" ] && duration=120
# set duration to 900 because it takes some time to boot node
[ "$FAILURE_MODE" = HARD ] && duration=900
+ local start_ts=$(date +%s)
local cmd="rundbench 1 -t $duration"
local pid=""
do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
- PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
+ PATH=\$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
- LCTL=$LCTL $cmd" &
+ MOUNT=$MOUNT DIR=$DIR/$tdir/\\\$(hostname) LCTL=$LCTL $cmd" &
pid=$!
log "Started rundbench load pid=$pid ..."
# give rundbench a chance to start, bug 24118
- sleep 2
- local elapsed=0
+ sleep 12
+ local elapsed=$(($(date +%s) - start_ts))
local num_failovers=0
- local start_ts=$(date +%s)
while [ $elapsed -lt $duration ]; do
- if ! check_for_process $clients rundbench; then
- error_noexit "rundbench not found on some of $clients!"
+ if ! check_for_process $clients dbench; then
+ error_noexit "dbench not running on some of $clients!"
killall_process $clients dbench
break
fi
rm -f $DIR/$tfile
replay_barrier $SINGLEMDS
-#define OBD_FAIL_LDLM_ENQUEUE 0x302
+#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
fail $SINGLEMDS
kill -USR1 $pid
test_74() {
local clients=${CLIENTS:-$HOSTNAME}
- stop ost1
zconf_umount_clients $clients $MOUNT
+ stop ost1
facet_failover $SINGLEMDS
zconf_mount_clients $clients $MOUNT
mount_facet ost1
}
run_test 74 "Ensure applications don't fail waiting for OST recovery"
+remote_dir_check_80() {
+ local MDTIDX=1
+ local diridx=$($GETSTRIPE -M $remote_dir)
+ [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+
+ createmany -o $remote_dir/f-%d 20 || error "creation failed"
+ local fileidx=$($GETSTRIPE -M $remote_dir/f-1)
+ [ $fileidx -eq $MDTIDX ] || error "$fileidx != $MDTIDX"
+
+ return 0
+}
+
test_80a() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
- mkdir -p $DIR/$tdir
- replay_barrier mds2
- $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
- rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
- fail mds2
- stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
- return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1500
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
}
-run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
+run_test 80a "DNE: create remote dir, drop update rep from MDT1, fail MDT1"
test_80b() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
- mkdir -p $DIR/$tdir
- replay_barrier $SINGLEMDS
- $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
- rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
- fail $SINGLEMDS
- stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
- return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1500
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80b "DNE: create remote dir, drop update rep from MDT1, fail MDT0"
+
+test_80c() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1500
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80c "DNE: create remote dir, drop update rep from MDT1, fail MDT[0,1]"
+
+test_80d() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1500
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80d "DNE: create remote dir, drop update rep from MDT1, fail 2 MDTs"
+
+test_80e() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80e "DNE: create remote dir, drop MDT0 rep, fail MDT0"
+
+test_80f() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
}
-run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
+run_test 80f "DNE: create remote dir, drop MDT0 rep, fail MDT1"
+
+test_80g() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "remote creation failed"
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80g "DNE: create remote dir, drop MDT0 rep, fail MDT0, then MDT1"
+
+test_80h() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ $LFS mkdir -i $MDTIDX $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || return 1
+
+ remote_dir_check_80 || error "remote dir check failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 80h "DNE: create remote dir, drop MDT0 rep, fail 2 MDTs"
test_81a() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
- mkdir -p $DIR/$tdir
- createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
- sleep 10
- $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
- $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
- replay_barrier $SINGLEMDS
- rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
- fail $SINGLEMDS
- stat $DIR/$tdir/f1002
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1500
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
}
-run_test 81a "CMD: unlink cross-node file (fail mds with name)"
+run_test 81a "DNE: unlink remote dir, drop MDT0 update rep, fail MDT1"
+
+test_81b() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1500
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
-test_82a() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ fail mds${MDTIDX}
- local dir=$DIR/d82a
- replay_barrier mds2
- mkdir $dir || error "mkdir $dir failed"
- log "FAILOVER mds2"
- fail mds2
- stat $DIR
- $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
}
-run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
+run_test 81b "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0"
-test_82b() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+test_81c() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
- local dir=$DIR/d82b
- replay_barrier $SINGLEMDS
- mkdir $dir || error "mkdir $dir failed"
- log "FAILOVER mds1"
- fail $SINGLEMDS
- stat $DIR
- $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1500
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
}
-run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
+run_test 81c "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0,MDT1"
+
+test_81d() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_UPDATE_OBJ_NET 0x1500
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x1500
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 81d "DNE: unlink remote dir, drop MDT0 update reply, fail 2 MDTs"
+
+test_81e() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 81e "DNE: unlink remote dir, drop MDT1 req reply, fail MDT0"
+
+test_81f() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 81f "DNE: unlink remote dir, drop MDT1 req reply, fail MDT1"
+
+test_81g() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ fail mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 81g "DNE: unlink remote dir, drop req reply, fail M0, then M1"
+
+test_81h() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ rmdir $remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+
+ wait $CLIENT_PID || error "rm remote dir failed"
+
+ stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 81h "DNE: unlink remote dir, drop request reply, fail 2 MDTs"
test_83a() {
mkdir -p $DIR/$tdir
test_85b() { #bug 16774
lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
- lfs setstripe -o 0 -c 1 $DIR
+ do_facet mgs $LCTL pool_new $FSNAME.$TESTNAME || return 1
+ do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $FSNAME-OST0000 || return 2
+
+ $SETSTRIPE -c 1 -p $FSNAME.$TESTNAME $DIR
for i in `seq 100`; do
dd if=/dev/urandom of=$DIR/$tfile-$i bs=4096 count=32 >/dev/null 2>&1
addr=`echo $lov_id | awk '{print $4}' | awk -F '-' '{print $3}'`
count=`lctl get_param -n ldlm.namespaces.*OST0000*$addr.lock_unused_count`
echo "before recovery: unused locks count = $count"
+ [ $count != 0 ] || return 3
fail ost1
count2=`lctl get_param -n ldlm.namespaces.*OST0000*$addr.lock_unused_count`
echo "after recovery: unused locks count = $count2"
+ do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $FSNAME-OST0000 || return 4
+ do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME || return 5
+
if [ $count2 -ge $count ]; then
error "unused locks are not canceled"
fi
do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
replay_barrier ost1
- lfs setstripe -i 0 -c 1 $DIR/$tfile
+ $SETSTRIPE -i 0 -c 1 $DIR/$tfile
dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 || error "Cannot write"
cksum=`md5sum $DIR/$tfile | awk '{print $1}'`
cancel_lru_locks osc
do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
replay_barrier ost1
- lfs setstripe -i 0 -c 1 $DIR/$tfile
+ $SETSTRIPE -i 0 -c 1 $DIR/$tfile
dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 || error "Cannot write"
sleep 1 # Give it a chance to flush dirty data
echo TESTTEST | dd of=$DIR/$tfile bs=1 count=8 seek=64
mkdir -p $DIR/$tdir
mkdir -p $TMP/$tdir
- lfs setstripe $DIR/$tdir -o 0 -c 1 || error "setstripe"
+ $SETSTRIPE -i 0 -c 1 $DIR/$tdir || error "$SETSTRIPE"
replay_barrier ost1
replay_barrier $SINGLEMDS
mkdir -p $DIR/$tdir
rm -f $DIR/$tdir/$tfile
wait_mds_ost_sync
- wait_destroy_complete
+ wait_delete_completed
BLOCKS1=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
- lfs setstripe -i 0 -c 1 $DIR/$tdir/$tfile
+ $SETSTRIPE -i 0 -c 1 $DIR/$tdir/$tfile
dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile
sync
stop ost1
zconf_mount $(hostname) $MOUNT
client_up || return 1
wait_mds_ost_sync
+ wait_delete_completed
BLOCKS2=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
- [ "$BLOCKS1" == "$BLOCKS2" ] || error $((BLOCKS2 - BLOCKS1)) blocks leaked
+ [ $((BLOCKS2 - BLOCKS1)) -le 4 ] || \
+ error $((BLOCKS2 - BLOCKS1)) blocks leaked
}
run_test 89 "no disk space leak on late ost connection"
# file "f${index}" striped over 1 OST
# file "all" striped over all OSTs
- $LFS setstripe -c $OSTCOUNT $dir/all || error "setstripe failed to create $dir/all"
+ $SETSTRIPE -c $OSTCOUNT $dir/all ||
+ error "setstripe failed to create $dir/all"
for (( i=0; i<$OSTCOUNT; i++ )); do
local f=$dir/f$i
- $LFS setstripe -i $i -c 1 $f || error "setstripe failed to create $f"
+ $SETSTRIPE -i $i -c 1 $f || error "$SETSTRIPE failed to create $f"
- # confirm that setstripe actually created the stripe on the requested OST
+ # confirm setstripe actually created the stripe on the requested OST
local uuid=$(ostuuid_from_index $i)
for file in f$i all; do
if [[ $dir/$file != $($LFS find --obd $uuid --name $file $dir) ]]; then
- $LFS getstripe $dir/file
+ $GETSTRIPE $dir/file
error wrong stripe: $file, uuid: $uuid
fi
done
[[ $(echo $list | wc -w) -eq 2 ]] ||
error_noexit "lfs find reports the wrong list of affected files ${#list[@]}"
- echo "Check getstripe: lfs getstripe -r --obd $obd"
- list=$($LFS getstripe -r --obd $obd $dir)
+ echo "Check getstripe: $GETSTRIPE -r --obd $obd"
+ list=$($GETSTRIPE -r --obd $obd $dir)
echo "$list"
for file in all f$index; do
echo "$list" | grep $dir/$file ||
}
run_test 90 "lfs find identifies the missing striped file segments"
-complete $(basename $0) $SECONDS
+complete $SECONDS
check_and_cleanup_lustre
exit_status