# 1 2.5 2.5 4 4 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="17 26a 26b 50 51 57"
+[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+# bug number for skipped test: LU-2194 LU-2547
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 19b 24a 24b"
+
build_test_filter
# Allow us to override the setup if we already have a mounted system by
sleep $TIMEOUT
do_facet client "cmp $TMP/$tfile $DIR/$tfile" || return 2
start_read_ahead
+ rm -f $TMP/$tfile
}
run_test 16 "timeout bulk put, don't evict client (2732)"
rc=0
pgcache_empty || rc=2
$LCTL --device $osc2dev activate
- rm -f $f
+ rm -f $f $TMP/$tfile
return $rc
}
run_test 18a "manual ost invalidate clears page cache immediately"
# cache after the client reconnects?
rc=0
pgcache_empty || rc=2
- rm -f $f
+ rm -f $f $TMP/$tfile
return $rc
}
run_test 18b "eviction and reconnect clears page cache (2766)"
# cache after the client reconnects?
rc=0
pgcache_empty || rc=2
- rm -f $f
+ rm -f $f $TMP/$tfile
return $rc
}
run_test 18c "Dropped connect reply after eviction handing (14755)"
local BEFORE=`date +%s`
local EVICT
- mount_client $DIR2
+ mount_client $DIR2 || error "failed to mount $DIR2"
+
+ # cancel cached locks from OST to avoid eviction from it
+ cancel_lru_locks osc
- do_facet client "stat $DIR > /dev/null" || return 1
- drop_ldlm_cancel "chmod 0777 $DIR2"
+ do_facet client "stat $DIR > /dev/null" ||
+ error "failed to stat $DIR: $?"
+ drop_ldlm_cancel "chmod 0777 $DIR2" ||
+ error "failed to chmod $DIR2"
umount_client $DIR2
EVICT=$(do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state | \
awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }')
- [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || error "no eviction"
+ [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] ||
+ (do_facet client $LCTL get_param mdc.$FSNAME-MDT*.state;
+ error "no eviction: $EVICT before:$BEFORE")
}
run_test 19a "test expired_lock_main on mds (2867)"
local BEFORE=`date +%s`
local EVICT
- mount_client $DIR2
+ mount_client $DIR2 || error "failed to mount $DIR2: $?"
- do_facet client $MULTIOP $DIR/$tfile Ow || return 1
- drop_ldlm_cancel $MULTIOP $DIR2/$tfile Ow
- umount_client $DIR2
- do_facet client munlink $DIR/$tfile
+ # cancel cached locks from MDT to avoid eviction from it
+ cancel_lru_locks mdc
+
+ do_facet client $MULTIOP $DIR/$tfile Ow ||
+ error "failed to run multiop: $?"
+ drop_ldlm_cancel $MULTIOP $DIR2/$tfile Ow ||
+ error "failed to ldlm_cancel: $?"
+
+ umount_client $DIR2 || error "failed to unmount $DIR2: $?"
+ do_facet client munlink $DIR/$tfile ||
+ error "failed to unlink $DIR/$tfile: $?"
# let the client reconnect
client_reconnect
EVICT=$(do_facet client $LCTL get_param osc.$FSNAME-OST*.state | \
awk -F"[ [,]" '/EVICTED]$/ { if (mx<$4) {mx=$4;} } END { print mx }')
- [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] || error "no eviction"
+ [ ! -z "$EVICT" ] && [[ $EVICT -gt $BEFORE ]] ||
+ (do_facet client $LCTL get_param osc.$FSNAME-OST*.state;
+ error "no eviction: $EVICT before:$BEFORE")
}
run_test 19b "test expired_lock_main on ost (2867)"
rc=$?
lctl set_param fail_loc=0x0
client_reconnect
- [ $rc -eq 0 ] && error_ignore 5494 "multiop didn't fail fsync: rc $rc" || true
+ [ $rc -eq 0 ] &&
+ error_ignore bz5494 "multiop didn't fail fsync: rc $rc" || true
}
run_test 24a "fsync error (should return error)"
test_24b() {
remote_ost_nodsh && skip "remote OST with nodsh" && return 0
- dmesg -c
+ dmesg -c > /dev/null
mkdir -p $DIR/$tdir
lfs setstripe $DIR/$tdir -s 0 -i 0 -c 1
cancel_lru_locks osc
lctl set_param fail_loc=0x0
client_reconnect
[ $rc1 -eq 0 -o $rc2 -eq 0 ] &&
- error_ignore 5494 "multiop didn't fail fsync: $rc1 or close: $rc2" ||
+ error_ignore bz5494 "multiop didn't fail fsync: $rc1 or close: $rc2" ||
true
- dmesg | grep "dirty page discard:" || \
+ dmesg | grep "dirty page discard:" ||
error "no discarded dirty page found!"
}
run_test 24b "test dirty page discard due to client eviction"
# fail abort so client will be new again
fail_abort $SINGLEMDS
client_up || error "reconnect failed"
- wait_osc_import_state mds ost FULL
+ wait_osc_import_state $SINGLEMDS ost FULL
return 0
}
run_test 29a "error adding new clients doesn't cause LBUG (bug 22273)"
rc=$?
echo writemany returned $rc
#these may fail because of eviction due to slow AST response.
- [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+ [ $rc -eq 0 ] ||
+ error_ignore bz13652 "writemany returned rc $rc" || true
}
run_test 50 "failover MDS under load"
# and recovery was interrupted
sleep $TIMEOUT
kill -USR1 $CLIENT_PID
- wait $CLIENT_PID
+ wait $CLIENT_PID
rc=$?
echo writemany returned $rc
- [ $rc -eq 0 ] || error_ignore 13652 "writemany returned rc $rc" || true
+ [ $rc -eq 0 ] ||
+ error_ignore bz13652 "writemany returned rc $rc" || true
}
run_test 51 "failover MDS during recovery"
mkdir -p $DIR/$tdir
+ # Minimum pass speed is 2MBps
+ local ddtimeout=64
+ # LU-2887/LU-3089 - set min pass speed to 500KBps
+ [ "$(facet_fstype ost1)" = "zfs" ] && ddtimeout=256
+
# first dd should be finished quickly
$LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-1
- dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-1 bs=32M count=4 &
DDPID=$!
count=0
echo "step1: testing ......"
- while [ true ]; do
- if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
- count=$[count+1]
- if [ $count -gt 64 ]; then
- error "dd should be finished!"
- fi
- sleep 1
- done
+ while kill -0 $DDPID 2> /dev/null; do
+ let count++
+ if [ $count -gt $ddtimeout ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
$LFS setstripe -c 1 -i 0 $DIR/$tdir/$tfile-2
#define OBD_FAIL_OST_DROP_REQ 0x21d
do_facet ost1 lctl set_param fail_loc=0x0000021d
# second dd will be never finished
- dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
+ dd if=/dev/zero of=$DIR/$tdir/$tfile-2 bs=32M count=4 &
DDPID=$!
count=0
echo "step2: testing ......"
- while [ $count -le 64 ]; do
- dd_name="`ps x | awk '$1 == '$DDPID' { print $5 }'`"
- if [ -z $dd_name ]; then
- ls -l $DIR/$tdir
- echo "debug: (dd_name=$dd_name, dd_pid=$DDPID, time=$count)"
- error "dd shouldn't be finished!"
- fi
- count=$[count+1]
- sleep 1
- done
+ while [ $count -le $ddtimeout ]; do
+ if ! kill -0 $DDPID 2> /dev/null; then
+ ls -l $DIR/$tdir
+ error "dd shouldn't be finished! (time=$count)"
+ fi
+ let count++
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
#Recover fail_loc and dd will finish soon
do_facet ost1 lctl set_param fail_loc=0
count=0
echo "step3: testing ......"
- while [ true ]; do
- if [ -z `ps x | awk '$1 == '$DDPID' { print $5 }'` ]; then break; fi
- count=$[count+1]
- if [ $count -gt 500 ]; then
- error "dd should be finished!"
- fi
- sleep 1
- done
+ while kill -0 $DDPID 2> /dev/null; do
+ let count++
+ if [ $count -gt $((ddtimeout + 440)) ]; then
+ error "dd should be finished!"
+ fi
+ sleep 1
+ done
echo "(dd_pid=$DDPID, time=$count)successful"
- rm -rf $DIR/$tdir
+ rm -rf $DIR/$tdir
}
run_test 55 "ost_brw_read/write drops timed-out read/write request"
run_test 59 "Read cancel race on client eviction"
err17935 () {
- # we assume that all md changes are in the MDT0 changelog
- if [ $MDSCOUNT -gt 1 ]; then
- error_ignore 17935 $*
- else
- error $*
- fi
+ # we assume that all md changes are in the MDT0 changelog
+ if [ $MDSCOUNT -gt 1 ]; then
+ error_ignore bz17935 $*
+ else
+ error $*
+ fi
}
test_60() {
- MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid | \
- awk '{gsub(/_UUID/,""); print $1}' | head -1)
+ MDT0=$($LCTL get_param -n mdc.*.mds_server_uuid |
+ awk '{ gsub(/_UUID/,""); print $1 }' | head -n1)
NUM_FILES=15000
mkdir -p $DIR/$tdir
local target=${srv}_svc
local si=$(do_facet $srv lctl get_param -n $obdname.${!target}.instance)
- local ci=$(lctl get_param -n $cliname.${!target}-${cliname}-*.import | \
- awk '/instance/{ print $2 }' |head -1)
+ local ci=$(lctl get_param -n $cliname.${!target}-${cliname}-*.import |
+ awk '/instance/{ print $2 }' | head -n1)
return $([ $si -eq $ci ])
}
stop mds1
# We need this test because mds is like a client in IR context.
- start mds1 $MDSDEV1 || error "MDS should start w/o mgs"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS ||
+ error "MDS should start w/o mgs"
# start mgs and remount mds w/ ir
- start mgs $MGSDEV
+ start mgs $(mgsdevname) $MGS_MOUNT_OPTS
clients_up
# remount client so that fsdb will be created on the MGS
[ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
local remote_dir=$DIR/$tdir/remote_dir
local MDTIDX=1
+ local num
+
+ #prepare for 110 test, which need set striped dir on remote MDT.
+ for num in $(seq $MDSCOUNT); do
+ do_facet mds$num \
+ lctl set_param -n mdt.${FSNAME}*.enable_remote_dir=1 \
+ 2>/dev/null
+ done
mkdir -p $DIR/$tdir
- drop_request "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ drop_request "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
error "lfs mkdir failed"
local diridx=$($GETSTRIPE -M $remote_dir)
[ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX"
local MDTIDX=1
mkdir -p $DIR/$tdir
- drop_reint_reply "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ drop_reint_reply "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
error "lfs mkdir failed"
diridx=$($GETSTRIPE -M $remote_dir)
local MDTIDX=1
mkdir -p $DIR/$tdir
- drop_update_reply $((MDTIDX + 1)) "$LFS mkdir -i $MDTIDX $remote_dir" ||
+ drop_update_reply $MDTIDX "$LFS mkdir -i $MDTIDX -c2 $remote_dir" ||
error "lfs mkdir failed"
diridx=$($GETSTRIPE -M $remote_dir)
local MDTIDX=1
mkdir -p $DIR/$tdir
- $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+ $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed"
drop_request "rm -rf $remote_dir" || error "rm remote dir failed"
local MDTIDX=1
mkdir -p $DIR/$tdir
- $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+ $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed"
drop_reint_reply "rm -rf $remote_dir" || error "rm remote dir failed"
rm -rf $DIR/$tdir || error "rmdir failed"
local MDTIDX=1
mkdir -p $DIR/$tdir
- $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed"
+ $LFS mkdir -i $MDTIDX -c2 $remote_dir || error "lfs mkdir failed"
drop_update_reply $MDTIDX "rm -rf $remote_dir" ||
error "rm remote dir failed"
}
run_test 110f "remove remote directory: drop slave rep"
+test_110g () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local remote_dir=$DIR/$tdir/remote_dir
+ local MDTIDX=1
+
+ mkdir -p $remote_dir
+
+ createmany -o $remote_dir/f 100
+
+ #define OBD_FAIL_MIGRATE_NET_REP 0x1702
+ do_facet mds$MDTIDX lctl set_param fail_loc=0x1702
+ $LFS mv -M $MDTIDX $remote_dir || error "migrate failed"
+ do_facet mds$MDTIDX lctl set_param fail_loc=0x0
+
+ for file in $(find $remote_dir); do
+ mdt_index=$($LFS getstripe -M $file)
+ [ $mdt_index == $MDTIDX ] ||
+ error "$file is not on MDT${MDTIDX}"
+ done
+
+ rm -rf $DIR/$tdir || error "rmdir failed"
+}
+run_test 110g "drop reply during migration"
+
+# LU-2844 mdt prepare fail should not cause umount oops
+test_111 ()
+{
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.62) ]] ||
+ { skip "Need MDS version at least 2.3.62"; return 0; }
+
+ local mdsdev=$(mdsdevname ${SINGLEMDS//mds/})
+#define OBD_FAIL_MDS_CHANGELOG_INIT 0x151
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x151
+ stop $SINGLEMDS || error "stop MDS failed"
+ start $SINGLEMDS $mdsdev && error "start MDS should fail"
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
+ start $SINGLEMDS $mdsdev || error "start MDS failed"
+}
+run_test 111 "mdd setup fail should not cause umount oops"
+
+# LU-793
+test_112a() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
+ do_facet_random_file client $TMP/$tfile 100K ||
+ error_noexit "Create random file $TMP/$tfile"
+
+ pause_bulk "cp $TMP/$tfile $DIR/$tfile" $TIMEOUT ||
+ error_noexit "Can't pause_bulk copy"
+
+ df $DIR
+ # expect cmp to succeed, client resent bulk
+ cmp $TMP/$tfile $DIR/$tfile ||
+ error_noexit "Wrong data has been written"
+ rm $DIR/$tfile ||
+ error_noexit "Can't remove file"
+ rm $TMP/$tfile
+}
+run_test 112a "bulk resend while orignal request is in progress"
+
+# parameters: fail_loc CMD RC
+test_120_reply() {
+ local PID
+ local PID2
+ local rc=5
+ local fail
+
+ #define OBD_FAIL_LDLM_CP_CB_WAIT2 0x320
+ #define OBD_FAIL_LDLM_CP_CB_WAIT3 0x321
+ #define OBD_FAIL_LDLM_CP_CB_WAIT4 0x322
+ #define OBD_FAIL_LDLM_CP_CB_WAIT5 0x323
+
+ echo
+ echo -n "** FLOCK REPLY vs. EVICTION race, lock $2"
+ [ "$1" = "CLEANUP" ] &&
+ fail=0x80000320 && echo ", $1 cp first"
+ [ "$1" = "REPLY" ] &&
+ fail=0x80000321 && echo ", $1 cp first"
+ [ "$1" = "DEADLOCK CLEANUP" ] &&
+ fail=0x80000322 && echo " DEADLOCK, CLEANUP cp first"
+ [ "$1" = "DEADLOCK REPLY" ] &&
+ fail=0x80000323 && echo " DEADLOCK, REPLY cp first"
+
+ if [ x"$2" = x"get" ]; then
+ #for TEST lock, take a conflict in advance
+ # sleep longer than evictor to not confuse fail_loc: 2+2+4
+ echo "** Taking conflict **"
+ flocks_test 5 set read sleep 10 $DIR/$tfile &
+ PID2=$!
+
+ sleep 2
+ fi
+
+ $LCTL set_param fail_loc=$fail
+
+ flocks_test 5 $2 write $DIR/$tfile &
+ PID=$!
+
+ sleep 2
+ echo "** Evicting and re-connecting client **"
+ mds_evict_client
+
+ client_reconnect
+
+ if [ x"$2" = x"get" ]; then
+ wait $PID2
+ fi
+
+ wait $PID
+ rc=$?
+
+ # check if the return value is allowed
+ [ $rc -eq $3 ] && rc=0
+
+ $LCTL set_param fail_loc=0
+ return $rc
+}
+
+# a lock is taken, unlock vs. cleanup_resource() race for destroying
+# the ORIGINAL lock.
+test_120_destroy()
+{
+ local PID
+
+ flocks_test 5 set write sleep 4 $DIR/$tfile &
+ PID=$!
+ sleep 2
+
+ # let unlock to sleep in CP CB
+ $LCTL set_param fail_loc=$1
+ sleep 4
+
+ # let cleanup to cleep in CP CB
+ mds_evict_client
+
+ client_reconnect
+
+ wait $PID
+ rc=$?
+
+ $LCTL set_param fail_loc=0
+ return $rc
+}
+
+test_120() {
+ flock_is_enabled || { skip "mount w/o flock enabled" && return; }
+ touch $DIR/$tfile
+
+ test_120_reply "CLEANUP" set 5 || error "SET race failed"
+ test_120_reply "CLEANUP" get 5 || error "GET race failed"
+ test_120_reply "CLEANUP" unlock 5 || error "UNLOCK race failed"
+
+ test_120_reply "REPLY" set 5 || error "SET race failed"
+ test_120_reply "REPLY" get 5 || error "GET race failed"
+ test_120_reply "REPLY" unlock 5 || error "UNLOCK race failed"
+
+ # DEADLOCK tests
+ test_120_reply "DEADLOCK CLEANUP" set 5 || error "DEADLOCK race failed"
+ test_120_reply "DEADLOCK REPLY" set 35 || error "DEADLOCK race failed"
+
+ test_120_destroy 0x320 || error "unlock-cleanup race failed"
+}
+run_test 120 "flock race: completion vs. evict"
+
complete $SECONDS
check_and_cleanup_lustre
exit_status