# Skip these tests
# bug number for skipped tests:
-# LU-472
-ALWAYS_EXCEPT="$REPLAY_SINGLE_EXCEPT 61d"
+ALWAYS_EXCEPT="$REPLAY_SINGLE_EXCEPT "
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-case "$(lsb_release -sr)" in # only disable tests for el7
-7*) # bug number: LU-6455-----
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 28"
- ;;
-esac
-
-# 7.5 (min)"
+# time in minutes: 7.5"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="44b"
-[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
-# bug number for skipped test: LU-1867 LU-3127
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 89 73b"
+if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+# bug number for skipped test: LU-11388
+ ALWAYS_EXCEPT+="131b"
+fi
+if $SHARED_KEY; then
+# bug number for skipped tests: LU-9795 (all below)
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 0b 0c 0d 34 45"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 47 58b 58c 71a 85a"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 85b 86 88 89 90"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 93a 100a 100b 120"
+fi
build_test_filter
}
run_test 2b "touch"
+test_2c() {
+ replay_barrier $SINGLEMDS
+ $LFS setstripe -c $OSTCOUNT $DIR/$tfile
+ fail $SINGLEMDS
+ $CHECKSTAT -t file $DIR/$tfile ||
+ error "$CHECKSTAT $DIR/$tfile check failed"
+}
+run_test 2c "setstripe replay"
+
+test_2d() {
+ [[ $mds1_FSTYPE = "zfs" ]] &&
+ [[ $MDS1_VERSION -lt $(version_code 2.12.51) ]] &&
+ skip "requires LU-10143 fix on MDS"
+ replay_barrier $SINGLEMDS
+ $LFS setdirstripe -i 0 -c $MDSCOUNT $DIR/$tdir
+ fail $SINGLEMDS
+ $CHECKSTAT -t dir $DIR/$tdir ||
+ error "$CHECKSTAT $DIR/$tdir check failed"
+}
+run_test 2d "setdirstripe replay"
+
test_3a() {
local file=$DIR/$tfile
replay_barrier $SINGLEMDS
test_20b() { # bug 10480
local wait_timeout=$((TIMEOUT * 4))
- local BEFOREUSED
- local AFTERUSED
+ local extra=$(fs_log_size)
+ local n_attempts=1
+
+ sync_all_data
+ $LFS setstripe -i 0 -c 1 $DIR
+
+ local beforeused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
- BEFOREUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
while [ ! -e $DIR/$tfile ] ; do
- usleep 60 # give dd a chance to start
+ usleep 60 # give dd a chance to start
done
- $GETSTRIPE $DIR/$tfile || error "$GETSTRIPE $DIR/$tfile failed"
+ $LFS getstripe $DIR/$tfile || error "$LFS getstripe $DIR/$tfile failed"
# make it an orphan
rm -f $DIR/$tfile || error "rm -f $DIR/$tfile failed"
mds_evict_client
- client_up || client_up || true # reconnect
+ client_up || client_up || true # reconnect
- do_facet $SINGLEMDS "lctl set_param -n osd*.*MDT*.force_sync 1"
+ do_facet $SINGLEMDS "lctl set_param -n osd*.*MDT*.force_sync=1"
- fail $SINGLEMDS # start orphan recovery
+ fail $SINGLEMDS # start orphan recovery
wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
- wait_delete_completed_mds $wait_timeout ||
- error "delete did not complete"
+ wait_delete_completed $wait_timeout || error "delete did not finish"
+ sync_all_data
- AFTERUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
- log "before $BEFOREUSED, after $AFTERUSED"
- (( $AFTERUSED > $BEFOREUSED + $(fs_log_size) )) &&
- error "after $AFTERUSED > before $BEFOREUSED"
- return 0
+ while true; do
+ local afterused=$(df -P $DIR | tail -1 | awk '{ print $3 }')
+ log "before $beforeused, after $afterused"
+
+ (( $beforeused + $extra >= $afterused )) && break
+ n_attempts=$((n_attempts + 1))
+ [ $n_attempts -gt 3 ] &&
+ error "after $afterused > before $beforeused + $extra"
+
+ wait_zfs_commit $SINGLEMDS 5
+ sync_all_data
+ done
}
-run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
+
+run_test 20b "write, unlink, eviction, replay (test mds_cleanup_orphans)"
test_20c() { # bug 10480
multiop_bg_pause $DIR/$tfile Ow_c ||
# the page, guarnateeing that the unlock from the RPC completion would
# assert on trying to unlock the unlocked page.
test_41() {
- [ $OSTCOUNT -lt 2 ] &&
- skip_env "skipping test 41: we don't have a second OST to test with" &&
- return
+ [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" && return
local f=$MOUNT/$tfile
# make sure the start of the file is ost1
- $SETSTRIPE -S $((128 * 1024)) -i 0 $f
+ $LFS setstripe -S $((128 * 1024)) -i 0 $f
do_facet client dd if=/dev/zero of=$f bs=4k count=1 ||
error "dd on client failed"
cancel_lru_locks osc
do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
# lctl below may fail, it is valid case
$LCTL --device $mdcdev recover
- df $MOUNT
+ $LFS df $MOUNT
done
do_facet $SINGLEMDS "lctl set_param fail_loc=0"
[ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
test_48() {
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
- [ "$OSTCOUNT" -lt "2" ] &&
- skip_env "$OSTCOUNT < 2 OSTs -- skipping" && return
+ [ "$OSTCOUNT" -lt "2" ] && skip_env "needs >= 2 OSTs" && return
replay_barrier $SINGLEMDS
createmany -o $DIR/$tfile 20 ||
do_facet ost1 "lctl set_param fail_loc=0x80000216"
client_up || error "client_up failed"
+ # let the MDS discover the OST failure, attempt to recover, fail
+ # and recover again.
+ sleep $((3 * TIMEOUT))
+
createmany -o $DIR/$tfile 20 20 ||
error "createmany recraete $DIR/$tfile failed"
unlinkmany $DIR/$tfile 40 || error "unlinkmany $DIR/$tfile failed"
touch $DIR/$tfile || error "touch $DIR/$tfile failed"
replay_barrier $SINGLEMDS
fail $SINGLEMDS
- sleep 1
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery is not done"
+ wait_mds_ost_sync || error "wait_mds_ost_sync failed"
$CHECKSTAT -t file $DIR/$tfile ||
error "$CHECKSTAT $DIR/$tfile attribute check failed"
do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
test_65b() #bug 3055
{
- remote_ost_nodsh && skip "remote OST with nodsh" && return 0
-
- at_start || return 0
- # turn on D_ADAPTTO
- debugsave
- $LCTL set_param debug="other trace"
- $LCTL dk > /dev/null
- # Slow down a request to the current service time, this is critical
- # because previous tests may have caused this value to increase.
- $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tfile
- multiop $DIR/$tfile Ow1yc
- REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
- awk '/portal 6/ {print $5}'`
- REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
-
- do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
-#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
- do_facet ost1 $LCTL set_param fail_loc=0x224
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
- rm -f $DIR/$tfile
- $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tfile
- # force some real bulk transfer
- multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
+ at_start || return 0
+ # turn on D_ADAPTTO
+ debugsave
+ $LCTL set_param debug="other trace"
+ $LCTL dk > /dev/null
+ # Slow down a request to the current service time, this is critical
+ # because previous tests may have caused this value to increase.
+ $LFS setstripe --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
+ error "$LFS setstripe failed for $DIR/$tfile"
+
+ multiop $DIR/$tfile Ow1yc
+ REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
+ awk '/portal 6/ {print $5}'`
+ REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
+
+ do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
+ #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
+ do_facet ost1 $LCTL set_param fail_loc=0x224
- do_facet ost1 $LCTL set_param fail_loc=0
- # check for log message
- $LCTL dk | grep "Early reply #" || error "No early reply"
- debugrestore
- # client should show REQ_DELAY estimates
- lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
+ rm -f $DIR/$tfile
+ $LFS setstripe --stripe-index=0 --stripe-count=1 $DIR/$tfile ||
+ error "$LFS setstripe failed"
+ # force some real bulk transfer
+ multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
+
+ do_facet ost1 $LCTL set_param fail_loc=0
+ # check for log message
+ $LCTL dk | grep "Early reply #" || error "No early reply"
+ debugrestore
+ # client should show REQ_DELAY estimates
+ lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
}
run_test 65b "AT: verify early replies on packed reply / bulk"
$LCTL set_param fail_val=$(($ORIG + 5))
#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
$LCTL set_param fail_loc=0x50c
- ls $DIR/$tfile > /dev/null 2>&1
+ touch $DIR/$tfile > /dev/null 2>&1
$LCTL set_param fail_loc=0
CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts |
awk '/network/ {print $4}')
osc.$mdtosc.prealloc_next_id)
mkdir -p $DIR/$tdir/${OST} || error "mkdir $DIR/$tdir/${OST} failed"
- $SETSTRIPE -i 0 -c 1 $DIR/$tdir/${OST} || error "$SETSTRIPE failed"
+ $LFS setstripe -i 0 -c 1 $DIR/$tdir/${OST} ||
+ error "$LFS setstripe failed"
echo "Creating to objid $last_id on ost $OST..."
#define OBD_FAIL_OST_PAUSE_CREATE 0x223
do_facet ost1 "$LCTL set_param fail_val=20000"
do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- $SETSTRIPE --stripe-index=0 --count=1 $DIR/$tdir
-#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
- $LCTL set_param fail_val=$(($TIMEOUT - 1))
- $LCTL set_param fail_loc=0x80000312
- cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
- $LCTL set_param fail_val=$((TIMEOUT * 5 / 4))
- $LCTL set_param fail_loc=0x80000312
- cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
- $LCTL set_param fail_loc=0
-
- echo $ENQ_MIN >> $ldlm_enqueue_min
- do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
- rm -rf $DIR/$tdir
- return 0
+ $LFS setstripe --stripe-index=0 -c 1 $DIR/$tdir ||
+ error "$LFS setstripe failed for $DIR/$tdir"
+ #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
+ $LCTL set_param fail_val=$(($TIMEOUT - 1))
+ $LCTL set_param fail_loc=0x80000312
+ cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
+ $LCTL set_param fail_val=$((TIMEOUT * 5 / 4))
+ $LCTL set_param fail_loc=0x80000312
+ cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
+ $LCTL set_param fail_loc=0
+
+ echo $ENQ_MIN >> $ldlm_enqueue_min
+ do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
+ rm -rf $DIR/$tdir
+ return 0
}
run_test 68 "AT: verify slowing locks"
test_70b () {
local clients=${CLIENTS:-$HOSTNAME}
- local mdscount=$MDSCOUNT
-
- # until LU-6844 is fixed, run on one MDT instead of disabling test
- mdscount=1
zconf_mount_clients $clients $MOUNT
local start_ts=$(date +%s)
local cmd="rundbench 1 -t $duration"
local pid=""
- if [ $mdscount -ge 2 ]; then
- test_mkdir -p -c$mdscount $DIR/$tdir
- $LFS setdirstripe -D -c$mdscount $DIR/$tdir
+ if [ $MDSCOUNT -ge 2 ]; then
+ test_mkdir -p -c$MDSCOUNT $DIR/$tdir
+ $LFS setdirstripe -D -c$MDSCOUNT $DIR/$tdir
fi
do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
PATH=\$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
log "$TESTNAME fail mds$fail_index $num_failovers times"
fail mds$fail_index
elapsed=$(($(date +%s) - start_ts))
- if [ $fail_index -ge $mdscount ]; then
+ if [ $fail_index -ge $MDSCOUNT ]; then
fail_index=1
else
fail_index=$((fail_index+1))
}
rm -rf $DIR/$tdir/test || {
echo "rmdir fails"
+ ls -lR $DIR/$tdir
break
}
rm -rf $DIR/$tdir/test1 || {
echo "rmdir fails"
+ ls -lR $DIR/$tdir/test1
break
}
done
}
run_test 70d "mkdir/rmdir striped dir ${MDSCOUNT}mdts recovery"
-cleanup_70e() {
- trap 0
- kill -9 $rename_70e_pid
-}
-
test_70e () {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local clients=${CLIENTS:-$HOSTNAME}
local rc=0
- echo ha > /proc/sys/lnet/debug
+ lctl set_param debug=+ha
zconf_mount_clients $clients $MOUNT
local duration=300
$LFS mkdir -i0 $DIR/$tdir/test_1
touch $DIR/$tdir/test_0/a
touch $DIR/$tdir/test_1/b
- trap cleanup_70e EXIT
(
- while true; do
- mrename $DIR/$tdir/test_0/a $DIR/$tdir/test_1/b > \
- /dev/null || {
- echo "a->b fails"
- break;
- }
+ while true; do
+ mrename $DIR/$tdir/test_0/a $DIR/$tdir/test_1/b > /dev/null || {
+ echo "a->b fails"
+ break;
+ }
- checkstat $DIR/$tdir/test_0/a && {
- echo "a still exists"
- break
- }
+ checkstat $DIR/$tdir/test_0/a && {
+ echo "a still exists"
+ break
+ }
- checkstat $DIR/$tdir/test_1/b || {
- echo "b still exists"
- break
- }
+ checkstat $DIR/$tdir/test_1/b || {
+ echo "b still exists"
+ break
+ }
- touch $DIR/$tdir/test_0/a || {
- echo "touch a fails"
- break
- }
+ touch $DIR/$tdir/test_0/a || {
+ echo "touch a fails"
+ break
+ }
- mrename $DIR/$tdir/test_1/b $DIR/$tdir/test_0/a > \
- /dev/null || {
- echo "a->a fails"
- break;
- }
- done
+ mrename $DIR/$tdir/test_1/b $DIR/$tdir/test_0/a > /dev/null || {
+ echo "a->a fails"
+ break;
+ }
+ done
)&
rename_70e_pid=$!
- echo "Started $rename_70e_pid"
+ stack_trap "kill -9 $rename_70e_pid" EXIT
+ echo "Started PID=$rename_70e_pid"
random_fail_mdt 2 $duration $rename_70e_pid
kill -0 $rename_70e_pid || error "rename $rename_70e_pid stopped"
-
- cleanup_70e
- true
}
run_test 70e "rename cross-MDT with random fails"
+test_70f_write_and_read(){
+ local srcfile=$1
+ local stopflag=$2
+ local client
+
+ echo "Write/read files in: '$DIR/$tdir', clients: '$CLIENTS' ..."
+ for client in ${CLIENTS//,/ }; do
+ [ -f $stopflag ] || return
+
+ local tgtfile=$DIR/$tdir/$tfile.$client
+ do_node $client dd $DD_OPTS bs=1M count=10 if=$srcfile \
+ of=$tgtfile 2>/dev/null ||
+ error "dd $DD_OPTS bs=1M count=10 if=$srcfile " \
+ "of=$tgtfile failed on $client, rc=$?"
+ done
+
+ local prev_client=$(echo ${CLIENTS//,/ } | awk '{ print $NF }')
+ local index=0
+
+ for client in ${CLIENTS//,/ }; do
+ [ -f $stopflag ] || return
+
+ # flush client cache in case test is running on only one client
+ # do_node $client cancel_lru_locks osc
+ do_node $client $LCTL set_param ldlm.namespaces.*.lru_size=clear
+
+ tgtfile=$DIR/$tdir/$tfile.$client
+ local md5=$(do_node $prev_client "md5sum $tgtfile")
+ [ ${checksum[$index]// */} = ${md5// */} ] ||
+ error "$tgtfile: checksum doesn't match on $prev_client"
+ index=$((index + 1))
+ prev_client=$client
+ done
+}
+
+test_70f_loop(){
+ local srcfile=$1
+ local stopflag=$2
+ DD_OPTS=
+
+ mkdir -p $DIR/$tdir || error "cannot create $DIR/$tdir directory"
+ $LFS setstripe -c -1 $DIR/$tdir ||
+ error "cannot $LFS setstripe $DIR/$tdir"
+
+ touch $stopflag
+ while [ -f $stopflag ]; do
+ test_70f_write_and_read $srcfile $stopflag
+ # use direct IO and buffer cache in turns if loop
+ [ -n "$DD_OPTS" ] && DD_OPTS="" || DD_OPTS="oflag=direct"
+ done
+}
+
+test_70f_cleanup() {
+ trap 0
+ rm -f $TMP/$tfile.stop
+ do_nodes $CLIENTS rm -f $TMP/$tfile
+ rm -f $DIR/$tdir/$tfile.*
+}
+
+test_70f() {
+# [ x$ost1failover_HOST = x$ost_HOST ] &&
+# { skip "Failover host not defined" && return; }
+# [ -z "$CLIENTS" ] &&
+# { skip "CLIENTS are not specified." && return; }
+# [ $CLIENTCOUNT -lt 2 ] &&
+# { skip "Need 2 or more clients, have $CLIENTCOUNT" && return; }
+
+ [[ $(lustre_version_code ost1) -lt $(version_code 2.9.53) ]] &&
+ skip "Need server version at least 2.9.53" && return
+
+ echo "mount clients $CLIENTS ..."
+ zconf_mount_clients $CLIENTS $MOUNT
+
+ local srcfile=$TMP/$tfile
+ local client
+ local index=0
+
+ trap test_70f_cleanup EXIT
+ # create a different source file local to each client node so we can
+ # detect if the file wasn't written out properly after failover
+ do_nodes $CLIENTS dd bs=1M count=10 if=/dev/urandom of=$srcfile \
+ 2>/dev/null || error "can't create $srcfile on $CLIENTS"
+ for client in ${CLIENTS//,/ }; do
+ checksum[$index]=$(do_node $client "md5sum $srcfile")
+ index=$((index + 1))
+ done
+
+ local duration=120
+ [ "$SLOW" = "no" ] && duration=60
+ # set duration to 900 because it takes some time to boot node
+ [ "$FAILURE_MODE" = HARD ] && duration=900
+
+ local stopflag=$TMP/$tfile.stop
+ test_70f_loop $srcfile $stopflag &
+ local pid=$!
+
+ local elapsed=0
+ local num_failovers=0
+ local start_ts=$SECONDS
+ while [ $elapsed -lt $duration ]; do
+ sleep 3
+ replay_barrier ost1
+ sleep 1
+ num_failovers=$((num_failovers + 1))
+ log "$TESTNAME failing OST $num_failovers times"
+ fail ost1
+ sleep 2
+ elapsed=$((SECONDS - start_ts))
+ done
+
+ rm -f $stopflag
+ wait $pid
+ test_70f_cleanup
+}
+run_test 70f "OSS O_DIRECT recovery with $CLIENTCOUNT clients"
+
cleanup_71a() {
trap 0
kill -9 $mkdir_71a_pid
rm -f $DIR/$tfile
replay_barrier $SINGLEMDS
- #define OBD_FAIL_LDLM_REPLY 0x30c
- do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
+ #define OBD_FAIL_MDS_LDLM_REPLY_NET 0x157
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000157"
fail $SINGLEMDS
kill -USR1 $pid
wait $pid || error "multiop pid failed"
run_test 74 "Ensure applications don't fail waiting for OST recovery"
remote_dir_check_80() {
- local MDTIDX=1
+ local mdtidx=1
local diridx
local fileidx
- diridx=$($GETSTRIPE -M $remote_dir) ||
- error "$GETSTRIPE -M $remote_dir failed"
- [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+ diridx=$($LFS getstripe -m $remote_dir) ||
+ error "$LFS getstripe -m $remote_dir failed"
+ [ $diridx -eq $mdtidx ] || error "$diridx != $mdtidx"
createmany -o $remote_dir/f-%d 20 || error "creation failed"
- fileidx=$($GETSTRIPE -M $remote_dir/f-1) ||
- error "$GETSTRIPE -M $remote_dir/f-1 failed"
- [ $fileidx -eq $MDTIDX ] || error "$fileidx != $MDTIDX"
+ fileidx=$($LFS getstripe -m $remote_dir/f-1) ||
+ error "$LFS getstripe -m $remote_dir/f-1 failed"
+ [ $fileidx -eq $mdtidx ] || error "$fileidx != $mdtidx"
return 0
}
local remote_dir=$DIR/$tdir/remote_dir
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
$LFS mkdir -i $MDTIDX $remote_dir &
local CLIENT_PID=$!
run_test 80b "DNE: create remote dir, drop update rep from MDT0, fail MDT1"
test_80c() {
+ [[ $mds1_FSTYPE = "zfs" ]] &&
+ [[ $MDS1_VERSION -lt $(version_code 2.12.51) ]] &&
+ skip "requires LU-10143 fix on MDS"
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
local remote_dir=$DIR/$tdir/remote_dir
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
$LFS mkdir -i $MDTIDX $remote_dir &
local CLIENT_PID=$!
run_test 80c "DNE: create remote dir, drop update rep from MDT1, fail MDT[0,1]"
test_80d() {
+ [[ $mds1_FSTYPE = "zfs" ]] &&
+ [[ $MDS1_VERSION -lt $(version_code 2.12.51) ]] &&
+ skip "requires LU-10143 fix on MDS"
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local MDTIDX=1
local remote_dir=$DIR/$tdir/remote_dir
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
- #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701
+ #define OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
$LFS mkdir -i $MDTIDX $remote_dir &
local CLIENT_PID=$!
$LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
touch $remote_dir || error "touch $remote_dir failed"
- # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
+ # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
rmdir $remote_dir &
local CLIENT_PID=$!
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
$LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
- # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
+ # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
rmdir $remote_dir &
local CLIENT_PID=$!
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
$LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
- # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
+ # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
rmdir $remote_dir &
local CLIENT_PID=$!
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
$LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
- # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701
+ # OBD_FAIL_OUT_UPDATE_NET_REP 0x1701
do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701
rmdir $remote_dir &
local CLIENT_PID=$!
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
wait $CLIENT_PID || error "rm remote dir failed"
- stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!"
+ stat $remote_dir &>/dev/null && error "$remote_dir still exist!"
rm -rf $DIR/$tdir || error "rmdir failed"
done
lov_id=$(lctl dl | grep "clilov")
- addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $3}')
+ addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
count=$(lctl get_param -n \
ldlm.namespaces.*MDT0000*$addr.lock_unused_count)
echo "before recovery: unused locks count = $count"
run_test 85a "check the cancellation of unused locks during recovery(IBITS)"
test_85b() { #bug 16774
+ rm -rf $DIR/$tdir
+ mkdir $DIR/$tdir
+
lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
- do_facet mgs $LCTL pool_new $FSNAME.$TESTNAME ||
- error "unable to create pool $TESTNAME"
- do_facet mgs $LCTL pool_add $FSNAME.$TESTNAME $FSNAME-OST0000 ||
- error "unable to add pool $TESTNAME"
+ if ! combined_mgs_mds ; then
+ mount_mgs_client
+ fi
- $SETSTRIPE -c 1 -p $FSNAME.$TESTNAME $DIR
+ $LFS setstripe -c 1 -i 0 $DIR/$tdir
for i in $(seq 100); do
- dd if=/dev/urandom of=$DIR/$tfile-$i bs=4096 \
+ dd if=/dev/urandom of=$DIR/$tdir/$tfile-$i bs=4096 \
count=32 >/dev/null 2>&1
done
cancel_lru_locks osc
for i in $(seq 100); do
- dd if=$DIR/$tfile-$i of=/dev/null bs=4096 \
+ dd if=$DIR/$tdir/$tfile-$i of=/dev/null bs=4096 \
count=32 >/dev/null 2>&1
done
lov_id=$(lctl dl | grep "clilov")
- addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $3}')
- count=$(lctl get_param \
- -n ldlm.namespaces.*OST0000*$addr.lock_unused_count)
+ addr=$(echo $lov_id | awk '{print $4}' | awk -F '-' '{print $NF}')
+ count=$(lctl get_param -n \
+ ldlm.namespaces.*OST0000*$addr.lock_unused_count)
echo "before recovery: unused locks count = $count"
- [ $count != 0 ] || error "unused locks ($count) should be zero"
+ [ $count -ne 0 ] || error "unused locks ($count) should be zero"
fail ost1
-n ldlm.namespaces.*OST0000*$addr.lock_unused_count)
echo "after recovery: unused locks count = $count2"
- do_facet mgs $LCTL pool_remove $FSNAME.$TESTNAME $FSNAME-OST0000 ||
- error "unable to remove pool $TESTNAME"
- do_facet mgs $LCTL pool_destroy $FSNAME.$TESTNAME ||
- error "unable to destroy the pool $TESTNAME"
+ if ! combined_mgs_mds ; then
+ umount_mgs_client
+ fi
if [ $count2 -ge $count ]; then
error "unused locks are not canceled"
fi
+
+ rm -rf $DIR/$tdir
}
run_test 85b "check the cancellation of unused locks during recovery(EXTENT)"
}
run_test 86 "umount server after clear nid_stats should not hit LBUG"
-test_87() {
+test_87a() {
do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
replay_barrier ost1
- $SETSTRIPE -i 0 -c 1 $DIR/$tfile
+ $LFS setstripe -i 0 -c 1 $DIR/$tfile
dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
error "dd to $DIR/$tfile failed"
cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
error "New checksum $cksum2 does not match original $cksum"
fi
}
-run_test 87 "write replay"
+run_test 87a "write replay"
test_87b() {
do_facet ost1 "lctl set_param -n obdfilter.${ost1_svc}.sync_journal 0"
replay_barrier ost1
- $SETSTRIPE -i 0 -c 1 $DIR/$tfile
+ $LFS setstripe -i 0 -c 1 $DIR/$tfile
dd if=/dev/urandom of=$DIR/$tfile bs=1024k count=8 ||
error "dd to $DIR/$tfile failed"
sleep 1 # Give it a chance to flush dirty data
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
mkdir -p $TMP/$tdir || error "mkdir $TMP/$tdir failed"
- $SETSTRIPE -i 0 -c 1 $DIR/$tdir || error "$SETSTRIPE"
+ $LFS setstripe -i 0 -c 1 $DIR/$tdir || error "$LFS setstripe failed"
replay_barrier ost1
replay_barrier $SINGLEMDS
}
run_test 88 "MDS should not assign same objid to different files "
+function calc_osc_kbytes_used() {
+ local kbtotal=$(calc_osc_kbytes kbytestotal)
+ local kbfree=$(calc_osc_kbytes kbytesfree)
+ echo $((kbtotal-kbfree))
+}
+
test_89() {
cancel_lru_locks osc
mkdir $DIR/$tdir || error "mkdir $DIR/$tdir failed"
rm -f $DIR/$tdir/$tfile
- wait_mds_ost_sync
- wait_delete_completed
- BLOCKS1=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
- $SETSTRIPE -i 0 -c 1 $DIR/$tdir/$tfile
- dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile
+ wait_mds_ost_sync || error "initial MDS-OST sync timed out"
+ wait_delete_completed || error "initial wait delete timed out"
+ local blocks1=$(calc_osc_kbytes_used)
+ local write_size=$(fs_log_size)
+
+ $LFS setstripe -i 0 -c 1 $DIR/$tdir/$tfile
+ [ $write_size -lt 1024 ] && write_size=1024
+ dd if=/dev/zero bs=${write_size}k count=10 of=$DIR/$tdir/$tfile
sync
stop ost1
facet_failover $SINGLEMDS
mount_facet ost1
zconf_mount $(hostname) $MOUNT || error "mount fails"
client_up || error "client_up failed"
- wait_mds_ost_sync
- wait_delete_completed
- BLOCKS2=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }')
- [ $((BLOCKS2 - BLOCKS1)) -le 4 ] ||
- error $((BLOCKS2 - BLOCKS1)) blocks leaked
-}
+ # wait for the remounted client to connect to ost1
+ local target=$(get_osc_import_name client ost1)
+ wait_import_state "FULL" "osc.${target}.ost_server_uuid" \
+ $(max_recovery_time)
+
+ wait_mds_ost_sync || error "MDS-OST sync timed out"
+ wait_delete_completed || error "wait delete timed out"
+ local blocks2=$(calc_osc_kbytes_used)
+
+ [ $((blocks2 - blocks1)) -le $(fs_log_size) ] ||
+ error $((blocks2 - blocks1)) blocks leaked
+}
run_test 89 "no disk space leak on late ost connection"
cleanup_90 () {
- local facet=$1
- trap 0
- reboot_facet $facet
- change_active $facet
- wait_for_facet $facet
- mount_facet $facet || error "Restart of $facet failed"
- clients_up
+ local facet=$1
+
+ trap 0
+ reboot_facet $facet
+ change_active $facet
+ wait_for_facet $facet
+ mount_facet $facet || error "Restart of $facet failed"
+ clients_up
}
test_90() { # bug 19494
return 0
fi
fi
+ # ensure all OSTs are active to allow allocations
+ wait_osts_up
mkdir $dir || error "mkdir $dir failed"
echo "Create the files"
- # file "f${index}" striped over 1 OST
- # file "all" striped over all OSTs
+ # file "f${index}" striped over 1 OST
+ # file "all" striped over all OSTs
- $SETSTRIPE -c $OSTCOUNT $dir/all ||
- error "setstripe failed to create $dir/all"
+ $LFS setstripe -c $OSTCOUNT $dir/all ||
+ error "setstripe failed to create $dir/all"
- for (( i=0; i<$OSTCOUNT; i++ )); do
- local f=$dir/f$i
- $SETSTRIPE -i $i -c 1 $f || error "$SETSTRIPE failed to create $f"
+ for ((i = 0; i < $OSTCOUNT; i++)); do
+ local f=$dir/f$i
- # confirm setstripe actually created the stripe on the requested OST
- local uuid=$(ostuuid_from_index $i)
- for file in f$i all; do
- if [[ $dir/$file != $($LFS find --obd $uuid --name $file $dir) ]]; then
- $GETSTRIPE $dir/$file
- error wrong stripe: $file, uuid: $uuid
- fi
- done
- done
+ $LFS setstripe -i $i -c 1 $f ||
+ error "$LFS setstripe failed to create $f"
+
+ # confirm setstripe actually created stripe on requested OST
+ local uuid=$(ostuuid_from_index $i)
+
+ for file in f$i all; do
+ local found=$($LFS find --obd $uuid --name $file $dir)
+
+ if [[ $dir/$file != $found ]]; then
+ $LFS getstripe $dir/$file
+ error "wrong stripe: $file, uuid: $uuid"
+ fi
+ done
+ done
# Before failing an OST, get its obd name and index
local varsvc=${ostfail}_svc
[[ $(echo $list | wc -w) -eq 2 ]] ||
error_noexit "lfs find reports the wrong list of affected files ${#list[@]}"
- echo "Check getstripe: $GETSTRIPE -r --obd $obd"
- list=$($GETSTRIPE -r --obd $obd $dir)
- echo "$list"
+ echo "Check getstripe: $LFS getstripe -r --obd $obd"
+ list=$($LFS getstripe -r --obd $obd $dir)
+ echo "$list"
for file in all f$index; do
echo "$list" | grep $dir/$file ||
error_noexit "lfs getsripe does not report the affected $obd for $file"
}
run_test 90 "lfs find identifies the missing striped file segments"
-test_93() {
+test_93a() {
local server_version=$(lustre_version_code $SINGLEMDS)
[[ $server_version -ge $(version_code 2.6.90) ]] ||
[[ $server_version -ge $(version_code 2.5.4) &&
cancel_lru_locks osc
- $SETSTRIPE -i 0 -c 1 $DIR/$tfile ||
- error "$SETSTRIPE $DIR/$tfile failed"
+ $LFS setstripe -i 0 -c 1 $DIR/$tfile ||
+ error "$LFS setstripe $DIR/$tfile failed"
dd if=/dev/zero of=$DIR/$tfile bs=1024 count=1 ||
error "dd to $DIR/$tfile failed"
#define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
do_facet ost1 "$LCTL set_param fail_loc=0x715"
fail ost1
}
-run_test 93 "replay + reconnect"
+run_test 93a "replay + reconnect"
+
+test_93b() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
+ [[ $server_version -ge $(version_code 2.7.90) ]] ||
+ { skip "Need MDS version 2.7.90+"; return; }
+
+ cancel_lru_locks mdc
+
+ createmany -o $DIR/$tfile 20 ||
+ error "createmany -o $DIR/$tfile failed"
+
+ #define OBD_FAIL_TGT_REPLAY_RECONNECT 0x715
+ # We need to emulate a state that MDT is waiting for other clients
+ # not completing the recovery. Final ping is queued, but reply will be
+ # sent on the recovery completion. It is done by sleep before
+ # processing final pings
+ do_facet mds1 "$LCTL set_param fail_val=80"
+ do_facet mds1 "$LCTL set_param fail_loc=0x715"
+ fail mds1
+}
+run_test 93b "replay + reconnect on mds"
striped_dir_check_100() {
local striped_dir=$DIR/$tdir/striped_dir
}
run_test 102d "check replay & reconstruction with multiple mod RPCs in flight"
+test_103() {
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return
+ local mds_version=$(lustre_version_code $SINGLEMDS)
+ [[ $mds_version -gt $(version_code 2.8.54) ]] ||
+ { skip "Need MDS version 2.8.54+"; return; }
+
+#define OBD_FAIL_MDS_TRACK_OVERFLOW 0x162
+ do_facet mds1 $LCTL set_param fail_loc=0x80000162
+
+ mkdir -p $DIR/$tdir
+ createmany -o $DIR/$tdir/t- 30 ||
+ error "create files on remote directory failed"
+ sync
+ rm -rf $DIR/$tdir/t-*
+ sync
+#MDS should crash with tr->otr_next_id overflow
+ fail mds1
+}
+run_test 103 "Check otr_next_id overflow"
+
+
check_striped_dir_110()
{
$CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
test_110a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_110b() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_110c() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_110d() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_110e() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_110f() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
replay_barrier mds1
replay_barrier mds2
$LFS mkdir -i1 -c$MDSCOUNT $DIR/$tdir/striped_dir
- fail mds1,mds2
+ fail mds2,mds1
check_striped_dir_110 || error "check striped_dir failed"
test_110g() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111a() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111b() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111c() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111d() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111e() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111f() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_111g() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112a() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112b() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112c() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112d() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112e() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112f() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112g() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112h() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112i() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112j() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112k() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112l() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112m() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_112n() {
[ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
test_115() {
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.7.56) ]] ||
+ skip "Need MDS version at least 2.7.56"
+
([ $FAILURE_MODE == "HARD" ] &&
[ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
skip "MDTs needs to be on diff hosts for HARD fail mode" &&
# OBD_FAIL_SPLIT_UPDATE_REC 0x1702
do_facet mds1 "lctl set_param fail_loc=0x80001702"
- $LFS setdirstripe -c$MDSCOUNT $DIR/$tdir/striped_dir
+ $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
fail mds1
$CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
# OBD_FAIL_SPLIT_UPDATE_REC 0x1702
do_facet mds2 "lctl set_param fail_loc=0x80001702"
- $LFS setdirstripe -c$MDSCOUNT $DIR/$tdir/striped_dir
+ $LFS setdirstripe -i0 -c$MDSCOUNT $DIR/$tdir/striped_dir
fail mds2
$CHECKSTAT -t dir $DIR/$tdir/striped_dir ||
replay_barrier mds1
mkdir $DIR/$tdir/dir_1
for ((i = 0; i < 20; i++)); do
- $LFS setdirstripe -c2 $DIR/$tdir/stripe_dir-$i
+ $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i
done
stop mds1
error "create dir-$i fails"
break
}
- $LFS setdirstripe -c2 $DIR/$tdir/stripe_dir-$i || {
+ $LFS setdirstripe -i0 -c2 $DIR/$tdir/stripe_dir-$i || {
error "create stripe_dir-$i fails"
break
}
}
run_test 120 "DNE fail abort should stop both normal and DNE replay"
+test_121() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.90) ] &&
+ skip "Don't support it before 2.11" &&
+ return 0
+
+ local at_max_saved=$(at_max_get mds)
+
+ touch $DIR/$tfile || error "touch $DIR/$tfile failed"
+ cancel_lru_locks mdc
+
+ multiop_bg_pause $DIR/$tfile s_s || error "multiop $DIR/$tfile failed"
+ mpid=$!
+
+ lctl set_param -n ldlm.cancel_unused_locks_before_replay "0"
+
+ stop mds1
+ change_active mds1
+ wait_for_facet mds1
+
+ #define OBD_FAIL_TGT_RECOVERY_REQ_RACE 0x721
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x721 fail_val=0"
+ at_max_set 0 mds
+
+ mount_facet mds1
+ wait_clients_import_state "$clients" mds1 FULL
+ clients_up || clients_up || error "failover df: $?"
+
+ kill -USR1 $mpid
+ wait $mpid || error "multiop_bg_pause pid failed"
+
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
+ lctl set_param -n ldlm.cancel_unused_locks_before_replay "1"
+ at_max_set $at_max_saved mds
+ rm -f $DIR/$tfile
+}
+run_test 121 "lock replay timed out and race"
+
+test_130a() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.90) ] &&
+ skip "Do not support Data-on-MDT before 2.11"
+
+ replay_barrier $SINGLEMDS
+ $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
+ fail $SINGLEMDS
+
+ [ $($LFS getstripe -L $DIR/$tfile) == "mdt" ] ||
+ error "Fail to replay DoM file creation"
+}
+run_test 130a "DoM file create (setstripe) replay"
+
+test_130b() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.90) ] &&
+ skip "Do not support Data-on-MDT before 2.11"
+
+ mkdir $DIR/$tdir
+ $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tdir
+ replay_barrier $SINGLEMDS
+ touch $DIR/$tdir/$tfile
+ fail $SINGLEMDS
+
+ [ $($LFS getstripe -L $DIR/$tdir/$tfile) == "mdt" ] ||
+ error "Fail to replay DoM file creation"
+}
+run_test 130b "DoM file create (inherited) replay"
+
+test_131a() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.90) ] &&
+ skip "Do not support Data-on-MDT before 2.11"
+
+ $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ echo "dom_data" | dd of=$DIR/$tfile bs=8 count=1
+ # lock is not canceled and will be replayed
+ fail $SINGLEMDS
+
+ [ $(cat $DIR/$tfile) == "dom_data" ] ||
+ error "Wrong file content after failover"
+}
+run_test 131a "DoM file write lock replay"
+
+test_131b() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.90) ] &&
+ skip "Do not support Data-on-MDT before 2.11"
+
+ $LFS setstripe -E 1M -L mdt -E EOF -c 2 $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ echo "dom_data" | dd of=$DIR/$tfile bs=8 count=1
+ cancel_lru_locks mdc
+
+ fail $SINGLEMDS
+
+ [ $(cat $DIR/$tfile) == "dom_data" ] ||
+ error "Wrong file content after failover"
+}
+run_test 131b "DoM file write replay"
+
+test_132a() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.9.90) ] &&
+ skip "Do not support PFL files before 2.10"
+
+ $LFS setstripe -E 1M -c 1 -E EOF -c 2 $DIR/$tfile
+ replay_barrier $SINGLEMDS
+ # write over the first component size cause next component instantiation
+ dd if=/dev/urandom of=$DIR/$tfile bs=1M count=1 seek=1 ||
+ error "dd to $DIR/$tfile failed"
+ lfs getstripe $DIR/$tfile
+
+ cksum=$(md5sum $DIR/$tfile | awk '{print $1}')
+ $LFS getstripe -I2 $DIR/$tfile | grep -q lmm_objects ||
+ error "Component #1 was not instantiated"
+
+ fail $SINGLEMDS
+
+ lfs getstripe $DIR/$tfile
+ $LFS getstripe -I2 $DIR/$tfile | grep -q lmm_objects ||
+ error "Component #1 instantiation was not replayed"
+ cksum2=$(md5sum $DIR/$tfile | awk '{print $1}')
+ if [ $cksum != $cksum2 ] ; then
+ error_noexit "New cksum $cksum2 does not match original $cksum"
+ fi
+}
+run_test 132a "PFL new component instantiate replay"
+
+test_133() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local remote_dir=$DIR/$tdir/remote_dir
+
+ mkdir -p $DIR/$tdir || error "mkdir $DIR/$tdir failed"
+ $LFS mkdir -i 1 $remote_dir
+
+ umount $MOUNT
+ do_facet mds2 $LCTL set_param seq.srv*MDT0001.space=clear
+
+ zconf_mount $(hostname) $MOUNT
+ client_up || return 1
+
+ #define OBD_FAIL_MDS_ALL_REQUEST_NET 0x123
+ # SEQ_QUERY = 700
+ do_facet mds1 $LCTL set_param fail_val=700 fail_loc=0x80000123
+ cp /etc/hosts $remote_dir/file &
+ local pid=$!
+ sleep 1
+
+ fail_nodf mds1
+
+ wait $pid || error "cp failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 133 "check resend of ongoing requests for lwp during failover"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status