ALWAYS_EXCEPT+=" 45 317"
fi
-# skip nfs tests on kernels >= 4.14.0 until they are fixed
-if [ $LINUX_VERSION_CODE -ge $(version_code 4.14.0) ]; then
+# skip nfs tests on kernels >= 4.12.0 until they are fixed
+if [ $LINUX_VERSION_CODE -ge $(version_code 4.12.0) ]; then
# bug number: LU-12661
ALWAYS_EXCEPT+=" 817"
fi
}
run_test 48e "Access to recreated parent subdir (should return errors)"
+test_48f() {
+ [[ $MDS1_VERSION -ge $(version_code 2.13.55) ]] ||
+ skip "need MDS >= 2.13.55"
+ [[ $MDSCOUNT -ge 2 ]] || skip "needs >= 2 MDTs"
+ [[ "$(facet_host mds1)" != "$(facet_host mds2)" ]] ||
+ skip "needs different host for mdt1 mdt2"
+ [[ $(facet_fstype mds1) == ldiskfs ]] || skip "ldiskfs only"
+
+ $LFS mkdir -i0 $DIR/$tdir
+ $LFS mkdir -i 1 $DIR/$tdir/sub1 $DIR/$tdir/sub2 $DIR/$tdir/sub3
+
+ for d in sub1 sub2 sub3; do
+ #define OBD_FAIL_OSD_REF_DEL 0x19c
+ do_facet mds1 $LCTL set_param fail_loc=0x8000019c
+ rm -rf $DIR/$tdir/$d && error "rm $d should fail"
+ done
+
+ rm -d --interactive=never $DIR/$tdir || error "rm $tdir fail"
+}
+run_test 48f "non-zero nlink dir unlink won't LBUG()"
+
test_49() { # LU-1030
[ $PARALLEL == "yes" ] && skip "skip parallel run"
remote_ost_nodsh && skip "remote OST with nodsh"
do_facet mds$index $LCTL set_param fail_loc=0x8000019a \
> /dev/null
- usleep 100
+ sleep 0.01
done
kill -9 $pid
}
run_test 74c "ldlm_lock_create error path, (shouldn't LBUG)"
-num_inodes() {
- [ -f /sys/kernel/slab/lustre_inode_cache/shrink ] &&
- echo 1 > /sys/kernel/slab/lustre_inode_cache/shrink
- awk '/lustre_inode_cache/ {print $2; exit}' /proc/slabinfo
+slab_lic=/sys/kernel/slab/lustre_inode_cache
+num_objects() {
+ [ -f $slab_lic/shrink ] && echo 1 > $slab_lic/shrink
+ [ -f $slab_lic/objects ] && awk '{ print $1 }' $slab_lic/objects ||
+ awk '/lustre_inode_cache/ { print $2; exit }' /proc/slabinfo
}
-test_76() { # Now for bug 20433, added originally in bug 1443
+test_76() { # Now for b=20433, added originally in b=1443
[ $PARALLEL == "yes" ] && skip "skip parallel run"
cancel_lru_locks osc
+ # there may be some slab objects cached per core
local cpus=$(getconf _NPROCESSORS_ONLN 2>/dev/null)
- local before=$(num_inodes)
+ local before=$(num_objects)
local count=$((512 * cpus))
- [ "$SLOW" = "no" ] && count=$((64 * cpus))
+ [ "$SLOW" = "no" ] && count=$((128 * cpus))
+ local margin=$((count / 10))
+ if [[ -f $slab_lic/aliases ]]; then
+ local aliases=$(cat $slab_lic/aliases)
+ (( aliases > 0 )) && margin=$((margin * aliases))
+ fi
- echo "before inodes: $before"
+ echo "before slab objects: $before"
for i in $(seq $count); do
touch $DIR/$tfile
rm -f $DIR/$tfile
done
cancel_lru_locks osc
- local after=$(num_inodes)
- echo "after inodes: $after"
- while (( after > before + 8 * ${cpus:-1} )); do
+ local after=$(num_objects)
+ echo "created: $count, after slab objects: $after"
+ # shared slab counts are not very accurate, allow significant margin
+ # the main goal is that the cache growth is not permanently > $count
+ while (( after > before + margin )); do
sleep 1
- after=$(num_inodes)
+ after=$(num_objects)
wait=$((wait + 1))
- (( wait % 5 == 0 )) && echo "wait $wait seconds inodes: $after"
- if (( wait > 30 )); then
- error "inode slab grew from $before to $after"
+ (( wait % 5 == 0 )) && echo "wait $wait seconds objects: $after"
+ if (( wait > 60 )); then
+ error "inode slab grew from $before+$margin to $after"
fi
done
}
test_180a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run"
- if ! module_loaded obdecho; then
+ if ! [ -d /sys/fs/lustre/echo_client ] &&
+ ! module_loaded obdecho; then
load_module obdecho/obdecho &&
stack_trap "rmmod obdecho" EXIT ||
error "unable to load obdecho on client"
}
run_test 230q "dir auto split"
+test_230r() {
+ [[ $PARALLEL != "yes" ]] || skip "skip parallel run"
+ [[ $MDSCOUNT -ge 2 ]] || skip_env "needs >= 2 MDTs"
+ [[ $MDS1_VERSION -ge $(version_code 2.13.54) ]] ||
+ skip "Need MDS version at least 2.13.54"
+
+ # maximum amount of local locks:
+ # parent striped dir - 2 locks
+ # new stripe in parent to migrate to - 1 lock
+ # source and target - 2 locks
+ # Total 5 locks for regular file
+ mkdir -p $DIR/$tdir
+ $LFS mkdir -i1 -c2 $DIR/$tdir/dir1
+ touch $DIR/$tdir/dir1/eee
+
+ # create 4 hardlink for 4 more locks
+ # Total: 9 locks > RS_MAX_LOCKS (8)
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir2
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir3
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir4
+ $LFS mkdir -i1 -c1 $DIR/$tdir/dir5
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir2/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir3/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir4/eee
+ ln $DIR/$tdir/dir1/eee $DIR/$tdir/dir5/eee
+
+ cancel_lru_locks mdc
+
+ $LFS migrate -m1 -c1 $DIR/$tdir/dir1 ||
+ error "migrate dir fails"
+
+ rm -rf $DIR/$tdir || error "rm dir failed after migration"
+}
+run_test 230r "migrate with too many local locks"
+
test_231a()
{
# For simplicity this test assumes that max_pages_per_rpc
aiocp -a $PAGE_SIZE -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file
diff $DIR/$tfile $aio_file || "file diff after aiocp"
+
+ # make sure we don't crash and fail properly
+ aiocp -a 512 -b 64M -s 64M -f O_DIRECT $DIR/$tfile $aio_file &&
+ error "aio not aligned with PAGE SIZE should fail"
+
rm -rf $DIR/$tfile $aio_file
}
run_test 398d "run aiocp to verify block size > stripe size"