ALWAYS_EXCEPT="$SANITY_EXCEPT "
# bug number for skipped test: LU-9693 LU-6493 LU-9693
ALWAYS_EXCEPT+=" 42a 42b 42c "
-# bug number: LU-8411 LU-9054 LU-13314
-ALWAYS_EXCEPT+=" 407 312 56ob"
+# bug number: LU-8411 LU-9054
+ALWAYS_EXCEPT+=" 407 312"
if $SHARED_KEY; then
# bug number: LU-9795 LU-9795 LU-9795 LU-9795
ALWAYS_EXCEPT+=" 45 317"
fi
-# skip nfs tests on kernels >= 4.14.0 until they are fixed
-if [ $LINUX_VERSION_CODE -ge $(version_code 4.14.0) ]; then
+# skip nfs tests on kernels >= 4.12.0 until they are fixed
+if [ $LINUX_VERSION_CODE -ge $(version_code 4.12.0) ]; then
# bug number: LU-12661
ALWAYS_EXCEPT+=" 817"
fi
skip "Does not support layout lock."
}
+check_swap_layout_no_dom()
+{
+ local FOLDER=$1
+ local SUPP=$(lfs getstripe $FOLDER | grep "pattern: mdt" | wc -l)
+ [ $SUPP -eq 0 ] || skip "layout swap does not support DOM files so far"
+}
+
check_and_setup_lustre
DIR=${DIR:-$MOUNT}
assert_DIR
done
local failed=0
- for i in {1..50}; do
+ for i in {1..250}; do
for fname in $(mktemp -u $DIR/$tdir/.$tfile.XXXXXX) \
$(mktemp $DIR/$tdir/$tfile.XXXXXXXX); do
touch $fname || error "touch $fname failed"
done
done
echo "$failed MDT index mismatches"
- (( failed < 4 )) || error "MDT index mismatch $failed times"
+ (( failed < 20 )) || error "MDT index mismatch $failed times"
}
run_test 33h "temp file is located on the same MDT as target"
$DIR/$tdir/$tfile && error "execute $DIR/$tdir/$tfile succeeded" || true
kill -USR1 $pid
+ # Wait for multiop to exit
+ wait $pid
}
run_test 43A "execution of file opened for write should return -ETXTBSY"
}
run_test 48e "Access to recreated parent subdir (should return errors)"
+test_48f() {
+ [[ $MDS1_VERSION -ge $(version_code 2.13.55) ]] ||
+ skip "need MDS >= 2.13.55"
+ [[ $MDSCOUNT -ge 2 ]] || skip "needs >= 2 MDTs"
+ [[ "$(facet_host mds1)" != "$(facet_host mds2)" ]] ||
+ skip "needs different host for mdt1 mdt2"
+ [[ $(facet_fstype mds1) == ldiskfs ]] || skip "ldiskfs only"
+
+ $LFS mkdir -i0 $DIR/$tdir
+ $LFS mkdir -i 1 $DIR/$tdir/sub1 $DIR/$tdir/sub2 $DIR/$tdir/sub3
+
+ for d in sub1 sub2 sub3; do
+ #define OBD_FAIL_OSD_REF_DEL 0x19c
+ do_facet mds1 $LCTL set_param fail_loc=0x8000019c
+ rm -rf $DIR/$tdir/$d && error "rm $d should fail"
+ done
+
+ rm -d --interactive=never $DIR/$tdir || error "rm $tdir fail"
+}
+run_test 48f "non-zero nlink dir unlink won't LBUG()"
+
test_49() { # LU-1030
[ $PARALLEL == "yes" ] && skip "skip parallel run"
remote_ost_nodsh && skip "remote OST with nodsh"
cmd="$LFS find $dir -ctime +1s -type f"
nums=$($cmd | wc -l)
(( $nums == $count * 2 + 1)) ||
- error "'$cmd' wrong: found $nums, expected $((expected*2+1))"
+ error "'$cmd' wrong: found $nums, expected $((count * 2 + 1))"
}
run_test 56ob "check lfs find -atime -mtime -ctime with units"
do_facet mds$index $LCTL set_param fail_loc=0x8000019a \
> /dev/null
- usleep 100
+ sleep 0.01
done
kill -9 $pid
}
run_test 74c "ldlm_lock_create error path, (shouldn't LBUG)"
-num_inodes() {
- awk '/lustre_inode_cache/ {print $2; exit}' /proc/slabinfo
+slab_lic=/sys/kernel/slab/lustre_inode_cache
+num_objects() {
+ [ -f $slab_lic/shrink ] && echo 1 > $slab_lic/shrink
+ [ -f $slab_lic/objects ] && awk '{ print $1 }' $slab_lic/objects ||
+ awk '/lustre_inode_cache/ { print $2; exit }' /proc/slabinfo
}
-test_76() { # Now for bug 20433, added originally in bug 1443
+test_76() { # Now for b=20433, added originally in b=1443
[ $PARALLEL == "yes" ] && skip "skip parallel run"
cancel_lru_locks osc
+ # there may be some slab objects cached per core
local cpus=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
- local before=$(num_inodes)
+ local before=$(num_objects)
local count=$((512 * cpus))
- [ "$SLOW" = "no" ] && count=$((64 * cpus))
+ [ "$SLOW" = "no" ] && count=$((128 * cpus))
+ local margin=$((count / 10))
+ if [[ -f $slab_lic/aliases ]]; then
+ local aliases=$(cat $slab_lic/aliases)
+ (( aliases > 0 )) && margin=$((margin * aliases))
+ fi
- echo "before inodes: $before"
+ echo "before slab objects: $before"
for i in $(seq $count); do
touch $DIR/$tfile
rm -f $DIR/$tfile
done
cancel_lru_locks osc
- local after=$(num_inodes)
- echo "after inodes: $after"
- while (( after > before + 8 * ${cpus:-1} )); do
+ local after=$(num_objects)
+ echo "created: $count, after slab objects: $after"
+ # shared slab counts are not very accurate, allow significant margin
+ # the main goal is that the cache growth is not permanently > $count
+ while (( after > before + margin )); do
sleep 1
- after=$(num_inodes)
+ after=$(num_objects)
wait=$((wait + 1))
- (( wait % 5 == 0 )) && echo "wait $wait seconds inodes: $after"
- if (( wait > 30 )); then
- error "inode slab grew from $before to $after"
+ (( wait % 5 == 0 )) && echo "wait $wait seconds objects: $after"
+ if (( wait > 60 )); then
+ error "inode slab grew from $before+$margin to $after"
fi
done
}
}
run_test 81b "OST should return -ENOSPC when retry still fails ======="
-test_82() { # LU-1031
- dd if=/dev/zero of=$DIR/$tfile bs=1M count=10
- local gid1=14091995
- local gid2=16022000
-
- multiop_bg_pause $DIR/$tfile OG${gid1}_g${gid1}c || return 1
- local MULTIPID1=$!
- multiop_bg_pause $DIR/$tfile O_G${gid2}r10g${gid2}c || return 2
- local MULTIPID2=$!
- kill -USR1 $MULTIPID2
- sleep 2
- if [[ `ps h -o comm -p $MULTIPID2` == "" ]]; then
- error "First grouplock does not block second one"
- else
- echo "Second grouplock blocks first one"
- fi
- kill -USR1 $MULTIPID1
- wait $MULTIPID1
- wait $MULTIPID2
-}
-run_test 82 "Basic grouplock test"
-
test_99() {
[ -z "$(which cvs 2>/dev/null)" ] && skip_env "could not find cvs"
sed -n '/pages per rpc/,/^$/p' |
awk '/'$pages':/ { reads += $2; writes += $6 }; \
END { print reads,writes }'))
- [ ${rpcs[0]} -ne $count ] && error "${rpcs[0]} != $count read RPCs" &&
- return 5
- [ ${rpcs[1]} -ne $count ] && error "${rpcs[1]} != $count write RPCs" &&
- return 6
-
- return 0
+ # allow one extra full-sized read RPC for async readahead
+ [[ ${rpcs[0]} == $count || ${rpcs[0]} == $((count + 1)) ]] ||
+ { error "${rpcs[0]} != $count read RPCs"; return 5; }
+ [[ ${rpcs[1]} == $count ]] ||
+ { error "${rpcs[1]} != $count write RPCs"; return 6; }
}
test_101g() {
skip "Limit is too small $LIMIT"
fi
- # Make LVF so higher that sleeping for $SLEEP is enough to _start_
- # killing locks. Some time was spent for creating locks. This means
- # that up to the moment of sleep finish we must have killed some of
- # them (10-100 locks). This depends on how fast ther were created.
- # Many of them were touched in almost the same moment and thus will
- # be killed in groups.
- local LVF=$(($MAX_HRS * 60 * 60 / $SLEEP * $LIMIT / $LRU_SIZE))
-
- # Use $LRU_SIZE_B here to take into account real number of locks
- # created in the case of CMD, LRU_SIZE_B != $NR in most of cases
- local LRU_SIZE_B=$LRU_SIZE
- log "LVF=$LVF"
+ # Make LVF so higher that sleeping for $SLEEP is enough to _start_
+ # killing locks. Some time was spent for creating locks. This means
+ # that up to the moment of sleep finish we must have killed some of
+ # them (10-100 locks). This depends on how fast ther were created.
+ # Many of them were touched in almost the same moment and thus will
+ # be killed in groups.
+ local LVF=$(($MAX_HRS * 60 * 60 / $SLEEP * $LIMIT / $LRU_SIZE * 100))
+
+ # Use $LRU_SIZE_B here to take into account real number of locks
+ # created in the case of CMD, LRU_SIZE_B != $NR in most of cases
+ local LRU_SIZE_B=$LRU_SIZE
+ log "LVF=$LVF"
local OLD_LVF=$($LCTL get_param -n $NSDIR.pool.lock_volume_factor)
log "OLD_LVF=$OLD_LVF"
$LCTL set_param -n $NSDIR.pool.lock_volume_factor $LVF
[ -z "$fid" ] && error "path2fid unable to get $tf FID"
# check that we get the same pathname back
- local found=$($LFS fid2path $MOUNT "$fid")
- [ -z "$found" ] && error "fid2path unable to get '$fid' path"
- [ "$found" == "$tf" ] ||
- error "fid2path($fid=path2fid($tf)) = $found != $tf"
+ local rootpath
+ local found
+ for rootpath in "$MOUNT" "$MOUNT///" "$MOUNT/$tfile"; do
+ echo "$rootpath $fid"
+ found=$($LFS fid2path $rootpath "$fid")
+ [ -z "$found" ] && error "fid2path unable to get '$fid' path"
+ [ "$found" == "$tf" ] || error "fid2path $found != $tf"
+ done
+
+ # check wrong root path format
+ rootpath=$MOUNT"_wrong"
+ found=$($LFS fid2path $rootpath "$fid")
+ [ -z "$found" ] || error "should fail ($rootpath != $MOUNT)"
}
run_test 154A "lfs path2fid and fid2path basic checks"
}
run_test 160k "Verify that changelog records are not lost"
+# Verifies that a file passed as a parameter has recently had an operation
+# performed on it that has generated an MTIME changelog which contains the
+# correct parent FID. As files might reside on a different MDT from the
+# parent directory in DNE configurations, the FIDs are translated to paths
+# before being compared, which should be identical
+compare_mtime_changelog() {
+ local file="${1}"
+ local mdtidx
+ local mtime
+ local cl_fid
+ local pdir
+ local dir
+
+ mdtidx=$($LFS getstripe --mdt-index $file)
+ mdtidx=$(printf "%04x" $mdtidx)
+
+ # Obtain the parent FID from the MTIME changelog
+ mtime=$($LFS changelog $FSNAME-MDT$mdtidx | tail -n 1 | grep MTIME)
+ [ -z "$mtime" ] && error "MTIME changelog not recorded"
+
+ cl_fid=$(sed -e 's/.* p=//' -e 's/ .*//' <<<$mtime)
+ [ -z "$cl_fid" ] && error "parent FID not present"
+
+ # Verify that the path for the parent FID is the same as the path for
+ # the test directory
+ pdir=$($LFS fid2path $MOUNT "$cl_fid")
+
+ dir=$(dirname $1)
+
+ [[ "${pdir%/}" == "$dir" ]] ||
+ error "MTIME changelog parent FID is wrong, expected $dir, got $pdir"
+}
+
+test_160l() {
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ $MDS1_VERSION -ge $(version_code 2.13.55) ]] ||
+ skip "Need MDS version at least 2.13.55"
+
+ local cl_user
+
+ changelog_register || error "changelog_register failed"
+ cl_user="${CL_USERS[$SINGLEMDS]%% *}"
+
+ changelog_users $SINGLEMDS | grep -q $cl_user ||
+ error "User '$cl_user' not found in changelog_users"
+
+ # Clear some types so that MTIME changelogs are generated
+ changelog_chmask "-CREAT"
+ changelog_chmask "-CLOSE"
+
+ test_mkdir $DIR/$tdir || error "failed to mkdir $DIR/$tdir"
+
+ # Test CL_MTIME during setattr
+ touch $DIR/$tdir/$tfile
+ compare_mtime_changelog $DIR/$tdir/$tfile
+
+ # Test CL_MTIME during close
+ dd if=/dev/urandom of=$DIR/$tdir/${tfile}_2 bs=1M count=64 ||
+ error "cannot create file $DIR/$tdir/${tfile}_2"
+ compare_mtime_changelog $DIR/$tdir/${tfile}_2
+}
+run_test 160l "Verify that MTIME changelog records contain the parent FID"
+
test_161a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run"
test_180a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run"
- if ! module_loaded obdecho; then
+ if ! [ -d /sys/fs/lustre/echo_client ] &&
+ ! module_loaded obdecho; then
load_module obdecho/obdecho &&
stack_trap "rmmod obdecho" EXIT ||
error "unable to load obdecho on client"
local cmpn_arg=$(cmp -n 2>&1 | grep "invalid option")
[ -n "$cmpn_arg" ] && skip_env "cmp does not support -n"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
local dir0=$DIR/$tdir/$testnum
mkdir -p $dir0 || error "creating dir $dir0"
test_184d() {
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
[ -z "$(which getfattr 2>/dev/null)" ] &&
skip_env "no getfattr command"
[[ $MDS1_VERSION -ge $(version_code 2.6.94) ]] ||
skip "Need MDS version at least 2.6.94"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
[ -z "$(which getfattr 2>/dev/null)" ] &&
skip_env "no getfattr command"
}
run_test 205b "Verify job stats jobid and output format"
+# LU-13733
+test_205c() {
+ $LCTL set_param llite.*.stats=0
+ dd if=/dev/zero of=$DIR/$tfile.1 bs=4k count=1
+ $LCTL get_param llite.*.stats
+ $LCTL get_param llite.*.stats | grep \
+ "write_bytes *1 samples \[bytes\] 4096 4096 4096 16777216" ||
+ error "wrong client stats format found"
+}
+run_test 205c "Verify client stats format"
+
# LU-1480, LU-1773 and LU-1657
test_206() {
mkdir -p $DIR/$tdir
sync; sleep 5; sync;
echo 3 > /proc/sys/vm/drop_caches
+ [ -f /sys/kernel/slab/ptlrpc_cache/shrink ] &&
+ echo 1 > /sys/kernel/slab/ptlrpc_cache/shrink
req_before=$(awk '/ptlrpc_cache / { print $2 }' /proc/slabinfo)
# open/close 500 times
done
echo 3 > /proc/sys/vm/drop_caches
+ [ -f /sys/kernel/slab/ptlrpc_cache/shrink ] &&
+ echo 1 > /sys/kernel/slab/ptlrpc_cache/shrink
req_after=$(awk '/ptlrpc_cache / { print $2 }' /proc/slabinfo)
echo "before: $req_before, after: $req_after"
}
run_test 226b "call path2fid and fid2path on files of all type under remote dir"
+test_226c () {
+ [ $MDSCOUNT -lt 2 ] && skip_env "needs >= 2 MDTs"
+ [[ $MDS1_VERSION -ge $(version_code 2.13.55) ]] ||
+ skip "Need MDS version at least 2.13.55"
+
+ local submnt=/mnt/submnt
+ local srcfile=/etc/passwd
+ local dstfile=$submnt/passwd
+ local path
+ local fid
+
+ rm -rf $DIR/$tdir
+ rm -rf $submnt
+ $LFS setdirstripe -c -1 -i 1 $DIR/$tdir ||
+ error "create remote directory failed"
+ mkdir -p $submnt || error "create $submnt failed"
+ $MOUNT_CMD $MGSNID:/$FSNAME/$tdir $submnt ||
+ error "mount $submnt failed"
+ stack_trap "umount $submnt" EXIT
+
+ cp $srcfile $dstfile
+ fid=$($LFS path2fid $dstfile)
+ path=$($LFS fid2path $submnt "$fid")
+ [ "$path" = "$dstfile" ] ||
+ error "fid2path $submnt $fid failed ($path != $dstfile)"
+}
+run_test 226c "call path2fid and fid2path under remote dir with subdir mount"
+
# LU-1299 Executing or running ldd on a truncated executable does not
# cause an out-of-memory condition.
test_227() {
skip "Need MDS version at least 2.13.52"
local mdts=$(comma_list $(mdts_nodes))
+ local timeout=100
local restripe_status
local delta
local i
local j
+ [[ $(facet_fstype mds1) == zfs ]] && timeout=300
+
# in case "crush" hash type is not set
do_nodes $mdts "$LCTL set_param lod.*.mdt_hash=crush"
$LFS setdirstripe -c $i $DIR/$tdir ||
error "split -c $i $tdir failed"
wait_update $HOSTNAME \
- "$LFS getdirstripe -H $DIR/$tdir" "crush" 100 ||
+ "$LFS getdirstripe -H $DIR/$tdir" "crush" $timeout ||
error "dir split not finished"
delta=$(do_nodes $mdts "lctl get_param -n mdt.*MDT*.md_stats" |
awk '/migrate/ {sum += $2} END { print sum }')
skip "Need MDS version at least 2.13.52"
local mdts=$(comma_list $(mdts_nodes))
+ local timeout=100
local restripe_status
local delta
local i
local j
+ [[ $(facet_fstype mds1) == zfs ]] && timeout=300
+
do_nodes $mdts "$LCTL set_param lod.*.mdt_hash=crush"
restripe_status=$(do_facet mds1 $LCTL get_param -n \
error "split -c $i $tdir failed"
[ $i -eq 1 ] && mdt_hash="none"
wait_update $HOSTNAME \
- "$LFS getdirstripe -H $DIR/$tdir" $mdt_hash 100 ||
+ "$LFS getdirstripe -H $DIR/$tdir" $mdt_hash $timeout ||
error "dir merge not finished"
delta=$(do_nodes $mdts "lctl get_param -n mdt.*MDT*.md_stats" |
awk '/migrate/ {sum += $2} END { print sum }')
}
run_test 230q "dir auto split"
+test_230r() {
+ [[ $PARALLEL != "yes" ]] || skip "skip parallel run"
+ [[ $MDSCOUNT -ge 2 ]] || skip_env "needs >= 2 MDTs"
+ [[ $MDS1_VERSION -ge $(version_code 2.13.54) ]] ||
+ skip "Need MDS version at least 2.13.54"
+
+ # maximum amount of local locks:
+ # parent striped dir - 2 locks
+ # new stripe in parent to migrate to - 1 lock
+ # source and target - 2 locks
+ # Total 5 locks for regular file
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i1 -c2 $DIR/$tdir/dir1
+ touch $DIR/$tdir/dir1/eee
+
+ # create 4 hardlink for 4 more locks
+ # Total: 9 locks > RS_MAX_LOCKS (8)
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir2
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir3
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir4
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir5
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir2/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir3/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir4/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir5/eee
+
+ cancel_lru_locks mdc
+
+ $LFS migrate -m1 -c1 $DIR/$tdir/dir1 ||
+ error "migrate dir fails"
+
+ rm -rf $DIR/$tdir || error "rm dir failed after migration"
+}
+run_test 230r "migrate with too many local locks"
+
test_231a()
{
# For simplicity this test assumes that max_pages_per_rpc
$LFS fid2path $submount $fid && error "fid2path should fail"
cleanup_247 $submount
}
-run_test 247c "running fid2path outside root"
+run_test 247c "running fid2path outside subdirectory root"
test_247d() {
lctl get_param -n mdc.$FSNAME-MDT0000*.import | grep -q subtree ||
FILESET="$FILESET/$tdir" mount_client $submount ||
error "mount $submount failed"
trap "cleanup_247 $submount" EXIT
- local fid=$($LFS path2fid $submount/dir1)
- $LFS fid2path $submount $fid || error "fid2path should succeed"
+
+ local td=$submount/dir1
+ local fid=$($LFS path2fid $td)
+ [ -z "$fid" ] && error "path2fid unable to get $td FID"
+
+ # check that we get the same pathname back
+ local rootpath
+ local found
+ for rootpath in "$submount" "$submount///" "$submount/dir1"; do
+ echo "$rootpath $fid"
+ found=$($LFS fid2path $rootpath "$fid")
+ [ -n "found" ] || error "fid2path should succeed"
+ [ "$found" == "$td" ] || error "fid2path $found != $td"
+ done
+ # check wrong root path format
+ rootpath=$submount"_wrong"
+ found=$($LFS fid2path $rootpath "$fid")
+ [ -z "$found" ] || error "fid2path should fail ($rootpath != $submount)"
+
cleanup_247 $submount
}
-run_test 247d "running fid2path inside root"
+run_test 247d "running fid2path inside subdirectory root"
# LU-8037
test_247e() {
--filename=$DIR/$tfile
[ $? -eq 0 ] || error "fio mixed read write error"
+ echo "AIO with large block size ${size}M"
+ fio --name=rand-rw --rw=randrw --bs=${size}M --direct=1 \
+ --numjobs=1 --fallocate=none --ioengine=libaio \
+ --iodepth=16 --allow_file_create=0 --size=${size}M \
+ --filename=$DIR/$tfile
+ [ $? -eq 0 ] || error "fio large block size failed"
+
rm -rf $DIR/$tfile
$LCTL set_param debug="$saved_debug"
}
run_test 398c "run fio to test AIO"
+test_398d() { # LU-13846
+ test -f aiocp || skip_env "no aiocp installed"
+ local aio_file=$DIR/aio_file
+
+ $LFS setstripe -c -1 -S 1M $DIR/$tfile $aio_file
+
+ dd if=/dev/urandom of=$DIR/$tfile bs=1M count=64
+ aiocp -a $PAGE_SIZE -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file
+
+ diff $DIR/$tfile $aio_file || "file diff after aiocp"
+
+ # make sure we don't crash and fail properly
+ aiocp -a 512 -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file &&
+ error "aio not aligned with PAGE SIZE should fail"
+
+ rm -rf $DIR/$tfile $aio_file
+}
+run_test 398d "run aiocp to verify block size > stripe size"
+
test_fake_rw() {
local read_write=$1
if [ "$read_write" = "write" ]; then
skip "Layout swap lock is not supported"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
test_mkdir $DIR/$tdir
swap_lock_test -d $DIR/$tdir ||
{
[[ $CLIENT_VERSION -lt $(version_code 2.9.59) ]] &&
skip "Need client version at least 2.9.59"
+ [ -f $LUSTRE/tests/kernel/kinode.ko ] ||
+ skip "Need MODULES build"
# Create a file, and stat it from the kernel
local testfile=$DIR/$tfile
}
run_test 424 "simulate ENOMEM in ptl_send_rpc bulk reply ME attach"
+test_425() {
+ test_mkdir -c -1 $DIR/$tdir
+ $LFS setstripe -c -1 $DIR/$tdir
+
+ lru_resize_disable "" 100
+ stack_trap "lru_resize_enable" EXIT
+
+ sleep 5
+
+ for i in $(seq $((MDSCOUNT * 125))); do
+ local t=$DIR/$tdir/$tfile_$i
+
+ dd if=/dev/zero of=$t bs=4K count=1 > /dev/null 2>&1 ||
+ error_noexit "Create file $t"
+ done
+ stack_trap "rm -rf $DIR/$tdir" EXIT
+
+ for oscparam in $($LCTL list_param ldlm.namespaces.*osc-[-0-9a-f]*); do
+ local lru_size=$($LCTL get_param -n $oscparam.lru_size)
+ local lock_count=$($LCTL get_param -n $oscparam.lock_count)
+
+ [ $lock_count -le $lru_size ] ||
+ error "osc lock count $lock_count > lru size $lru_size"
+ done
+
+ for mdcparam in $($LCTL list_param ldlm.namespaces.*mdc-*); do
+ local lru_size=$($LCTL get_param -n $mdcparam.lru_size)
+ local lock_count=$($LCTL get_param -n $mdcparam.lock_count)
+
+ [ $lock_count -le $lru_size ] ||
+ error "mdc lock count $lock_count > lru size $lru_size"
+ done
+}
+run_test 425 "lock count should not exceed lru size"
+
prep_801() {
[[ $MDS1_VERSION -lt $(version_code 2.9.55) ]] ||
[[ $OST1_VERSION -lt $(version_code 2.9.55) ]] &&