. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
-[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
# bug number for skipped test: LU-2840 LU-2189 LU-2776
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 21 36 51a"
+# LU-2829 / LU-2887 - make allowances for ZFS slowness
+ TEST33_NFILES=${TEST33_NFILES:-1000}
+fi
[ "$SLOW" = "no" ] && EXCEPT_SLOW="33a"
run_test 21 " Try to remove mountpoint on another dir ===="
test_23() { # Bug 5972
- local at_diff=$(do_facet $SINGLEMDS $LCTL get_param -n mdd.*.atime_diff)
+ local at_diff=$(do_facet $SINGLEMDS \
+ $LCTL get_param -n mdd.*MDT0000*.atime_diff | head -1)
echo "atime should be updated while another read" > $DIR1/$tfile
# clear the lock(mode: LCK_PW) gotten from creating operation
run_test 51a "layout lock: refresh layout should work"
test_51b() {
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.59) ]] ||
+ { skip "Need MDS version at least 2.3.59"; return 0; }
+
local tmpfile=`mktemp`
# create an empty file
run_test 70b "remove files after calling rm_entry"
test_71() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
+
+ [[ $server_version -lt $(version_code 2.1.6) ]] &&
+ skip "Need MDS version at least 2.1.6" && return
+
+ # Patch not applied to 2.2 and 2.3 branches
+ [[ $server_version -ge $(version_code 2.2.0) ]] &&
+ [[ $server_version -lt $(version_code 2.4.0) ]] &&
+ skip "Need MDS version at least 2.4.0" && return
+
checkfiemap --test ||
{ skip "checkfiemap not runnable: $?" && return; }
# write data this way: hole - data - hole - data
dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
[ "$(facet_fstype ost$(($($GETSTRIPE -i $DIR1/$tfile) + 1)))" = \
- "zfs" ] &&
+ "zfs" ] &&
skip "ORI-366/LU-1941: FIEMAP unimplemented on ZFS" && return 0
dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
GET_STAT="lctl get_param -n ldlm.services.ldlm_cbd.stats"
}
run_test 71 "correct file map just after write operation is finished"
+test_72() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ local tlink1
+ local tlink2
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile | grep value1 ||
+ error "getfattr1 failed"
+ setfattr -n user.attr1 -v value2 $DIR2/$tfile ||
+ error "setfattr2 failed"
+ getfattr -n user.attr1 $DIR1/$tfile | grep value2 ||
+ error "getfattr2 failed"
+
+ # check that trusted.link is consistent
+ tlink1=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ ln $DIR2/$tfile $DIR2/$tfile-2 || error "failed to link"
+ tlink2=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ echo "$tlink1 $tlink2"
+ [ "$tlink1" = "$tlink2" ] && error "trusted.link should have changed!"
+
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 72 "getxattr/setxattr cache should be consistent between nodes"
+
+test_73() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr1 failed"
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr2 failed"
+ clear_llite_stats
+ # PR lock should be cached by now on both clients
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr3 failed"
+ # 2 hits for getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 2 ] || error "not cached in $DIR1"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr4 failed"
+ # 4 hits for more getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 4 ] || error "not cached in $DIR2"
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 73 "getxattr should not cause xattr lock cancellation"
+
+test_74() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.93) ] &&
+ skip "Need MDS version at least 2.4.93" && return
+
+ dd if=/dev/zero of=$DIR1/$tfile-1 bs=1K count=1
+ dd if=/dev/zero of=$DIR1/$tfile-2 bs=1K count=1
+ flocks_test 4 $DIR1/$tfile-1 $DIR2/$tfile-2
+}
+run_test 74 "flock deadlock: different mounts =============="
+
log "cleanup: ======================================================"
[ "$(mount | grep $MOUNT2)" ] && umount $MOUNT2