# 1 2.5 2.5 4 4 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="17 26a 26b 50 51 57"
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+# bug number for skipped test: LU-2194 LU-2547
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 19b 24a 24b"
+
build_test_filter
# Allow us to override the setup if we already have a mounted system by
local BEFORE=`date +%s`
local EVICT
- mount_client $DIR2
+ mount_client $DIR2 || error "failed to mount $DIR2"
- do_facet client mcreate $DIR/$tfile || return 1
- drop_ldlm_cancel "chmod 0777 $DIR2"
+ # cancel cached locks from OST to avoid eviction from it
+ cancel_lru_locks osc
+
+ do_facet client "stat $DIR > /dev/null" ||
+ error "failed to stat $DIR: $?"
+ drop_ldlm_cancel "chmod 0777 $DIR2" ||
+ error "failed to chmod $DIR2"
umount_client $DIR2
- do_facet client "munlink $DIR/$tfile"
# let the client reconnect
- sleep 5
+ client_reconnect
EVICT=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | \
awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }')
- [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || error "no eviction"
+ [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] ||
+ (do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state;
+ error "no eviction: $EVICT before:$BEFORE")
}
run_test 19a "test expired_lock_main on mds (2867)"
local BEFORE=`date +%s`
local EVICT
- mount_client $DIR2
+ mount_client $DIR2 || error "failed to mount $DIR2: $?"
- do_facet client $MULTIOP $DIR/$tfile Ow || return 1
- drop_ldlm_cancel $MULTIOP $DIR2/$tfile Ow
- umount_client $DIR2
- do_facet client munlink $DIR/$tfile
+ # cancel cached locks from MDT to avoid eviction from it
+ cancel_lru_locks mdc
+
+ do_facet client $MULTIOP $DIR/$tfile Ow ||
+ error "failed to run multiop: $?"
+ drop_ldlm_cancel $MULTIOP $DIR2/$tfile Ow ||
+ error "failed to ldlm_cancel: $?"
+
+ umount_client $DIR2 || error "failed to unmount $DIR2: $?"
+ do_facet client munlink $DIR/$tfile ||
+ error "failed to unlink $DIR/$tfile: $?"
# let the client reconnect
- sleep 5
+ client_reconnect
EVICT=$(do_facet client $LCTL get_param osc.$FSNAME-OST*.state | \
awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }')
- [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || error "no eviction"
+ [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] ||
+ (do_facet client $LCTL get_param osc.$FSNAME-OST*.state;
+ error "no eviction: $EVICT before:$BEFORE")
}
run_test 19b "test expired_lock_main on ost (2867)"
rc=$?
lctl set_param fail_loc=0x0
client_reconnect
- [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
+ [ $rc -eq 0 ] &&
+ error_ignore bz5494 "multiop didn't fail fsync: rc $rc" || true
}
run_test 24a "fsync error (should return error)"
test_24b() {
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
- dmesg -c
+ dmesg -c > /dev/null
mkdir -p $DIR/$tdir
lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1
cancel_lru_locks osc
lctl set_param fail_loc=0x0
client_reconnect
[ $rc1 -eq 0 -o $rc2 -eq 0 ] &&
- error_ignore "multiop didn't fail fsync: $rc1 or close: $rc2" || true
+ error_ignore bz5494 "multiop didn't fail fsync: $rc1 or close: $rc2" ||
+ true
- dmesg | grep "dirty page discard:" || \
+ dmesg | grep "dirty page discard:" ||
error "no discarded dirty page found!"
}
run_test 24b "test dirty page discard due to client eviction"
# fail abort so client will be new again
fail_abort $SINGLEMDS
client_up || error "reconnect failed"
+ wait_osc_import_state $SINGLEMDS ost FULL
return 0
}
run_test 29a "error adding new clients doesn't cause LBUG (bug 22273)"
rc=$?
echo writemany returned $rc
#these may fail because of eviction due to slow AST response.
- [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+ [ $rc -eq 0 ] ||
+ error_ignore bz13652 "writemany returned rc $rc" || true
}
run_test 50 "failover MDS under load"
# and recovery was interrupted
sleep $TIMEOUT
kill -USR1 $CLIENT_PID
- wait $CLIENT_PID
+ wait $CLIENT_PID
rc=$?
echo writemany returned $rc
- [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+ [ $rc -eq 0 ] ||
+ error_ignore bz13652 "writemany returned rc $rc" || true
}
run_test 51 "failover MDS during recovery"
mkdir -p $DIR/$tdir
+ # Minimum pass speed is 2MBps
+ local ddtimeout=64
+ # LU-2887/LU-3089 - set min pass speed to 500KBps
+ [ "$(facet_fstype ost1)" = "zfs" ] && ddtimeout=256
+
# first dd should be finished quickly
$LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-1
- dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
DDPID=$!
count=0
echo "step1: testing ......"
- while [ true ]; do
- if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
- count=$[count+1]
- if [ $count -gt 64 ]; then
- error "dd should be finished!"
- fi
- sleep 1
- done
+ while kill -0 $DDPID 2> /dev/null; do
+ let count++
+ if [ $count -gt $ddtimeout ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
$LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-2
#define OBD_FAIL_OST_DROP_REQ 0x21d
do_facet ost1 lctl set_param fail_loc=0x0000021d
# second dd will be never finished
- dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
DDPID=$!
count=0
echo "step2: testing ......"
- while [ $count -le 64 ]; do
- dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
- if [ -z $dd_name ]; then
- ls -l $DIR/$tdir
- echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
- error "dd shouldn't be finished!"
- fi
- count=$[count+1]
- sleep 1
- done
+ while [ $count -le $ddtimeout ]; do
+ if ! kill -0 $DDPID 2> /dev/null; then
+ ls -l $DIR/$tdir
+ error "dd shouldn't be finished! (time=$count)"
+ fi
+ let count++
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
#Recover fail_loc and dd will finish soon
do_facet ost1 lctl set_param fail_loc=0
count=0
echo "step3: testing ......"
- while [ true ]; do
- if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
- count=$[count+1]
- if [ $count -gt 500 ]; then
- error "dd should be finished!"
- fi
- sleep 1
- done
+ while kill -0 $DDPID 2> /dev/null; do
+ let count++
+ if [ $count -gt $((ddtimeout + 440)) ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
- rm -rf $DIR/$tdir
+ rm -rf $DIR/$tdir
}
run_test 55 "ost_brw_read/write drops timed-out read/write request"
run_test 59 "Read cancel race on client eviction"
err17935 () {
- # we assume that all md changes are in the MDT0 changelog
- if [ $MDSCOUNT -gt 1 ]; then
- error_ignore 17935 $*
- else
- error $*
- fi
+ # we assume that all md changes are in the MDT0 changelog
+ if [ $MDSCOUNT -gt 1 ]; then
+ error_ignore bz17935 $*
+ else
+ error $*
+ fi
}
test_60() {
}
run_test 61 "Verify to not reuse orphan objects - bug 17025"
+# test_62 as seen it b2_1 please do not reuse test_62
+#test_62()
+#{
+# zconf_umount `hostname` $DIR
+# #define OBD_FAIL_PTLRPC_DELAY_IMP_FULL 0x516
+# lctl set_param fail_loc=0x516
+# mount_client $DIR
+#}
+#run_test 62 "Verify connection flags race - bug LU-1716"
+
check_cli_ir_state()
{
local NODE=${1:-$HOSTNAME}
stop mds1
# We need this test because mds is like a client in IR context.
- start mds1 $MDSDEV1 || error "MDS should start w/o mgs"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS ||
+ error "MDS should start w/o mgs"
# start mgs and remount mds w/ ir
- start mgs $MGSDEV
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS
clients_up
# remount client so that fsdb will be created on the MGS
return $rc
}
run_test 107 "drop reint reply, then restart MDT"
+
+test_110a () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ drop_request "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ error "lfs mkdir failed"
+ local diridx=$($GETSTRIPE -M $remote_dir)
+ [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110a "create remote directory: drop client req"
+
+test_110b () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ drop_reint_reply "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ error "lfs mkdir failed"
+
+ diridx=$($GETSTRIPE -M $remote_dir)
+ [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110b "create remote directory: drop Master rep"
+
+test_110c () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ drop_update_reply $((MDTIDX + 1)) "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ error "lfs mkdir failed"
+
+ diridx=$($GETSTRIPE -M $remote_dir)
+ [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110c "create remote directory: drop update rep on slave MDT"
+
+test_110d () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+
+ drop_request "rm -rf $remote_dir" || error "rm remote dir failed"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 110d "remove remote directory: drop client req"
+
+test_110e () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+ drop_reint_reply "rm -rf $remote_dir" || error "rm remote dir failed"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+
+ return 0
+}
+run_test 110e "remove remote directory: drop master rep"
+
+test_110f () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+ drop_update_reply $MDTIDX "rm -rf $remote_dir" ||
+ error "rm remote dir failed"
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110f "remove remote directory: drop slave rep"
+
+# LU-2844 mdt prepare fail should not cause umount oops
+test_111 ()
+{
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.62) ]] ||
+ { skip "Need MDS version at least 2.3.62"; return 0; }
+
+ local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
+#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x151
+ stop $SINGLEMDS || error "stop MDS failed"
+ start $SINGLEMDS $mdsdev && error "start MDS should fail"
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
+ start $SINGLEMDS $mdsdev || error "start MDS failed"
+}
+run_test 111 "mdd setup fail should not cause umount oops"
+
+# LU-793
+test_112a() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ do_facet_random_file client $TMP/$tfile 100K ||
+ error_noexit "Create random file $TMP/$tfile"
+
+ pause_bulk "cp $TMP/$tfile $DIR/$tfile" $TIMEOUT ||
+ error_noexit "Can't pause_bulk copy"
+
+ df $DIR
+ # expect cmp to succeed, client resent bulk
+ cmp $TMP/$tfile $DIR/$tfile ||
+ error_noexit "Wrong data has been written"
+ rm $DIR/$tfile ||
+ error_noexit "Can't remove file"
+ rm $TMP/$tfile
+}
+run_test 112a "bulk resend while orignal request is in progress"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status