. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
-[ $(facet_fstype $SINGLEMDS) = "zfs" ] &&
+if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
# bug number for skipped test: LU-2840 LU-2189 LU-2776
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 21 36 51a"
+# LU-2829 / LU-2887 - make allowances for ZFS slowness
+ TEST33_NFILES=${TEST33_NFILES:-1000}
+fi
[ "$SLOW" = "no" ] && EXCEPT_SLOW="33a"
run_test 21 " Try to remove mountpoint on another dir ===="
test_23() { # Bug 5972
- echo "others should see updated atime while another read" > $DIR1/f23
-
+ local at_diff=$(do_facet $SINGLEMDS \
+ $LCTL get_param -n mdd.*MDT0000*.atime_diff | head -1)
+ echo "atime should be updated while another read" > $DIR1/$tfile
+
# clear the lock(mode: LCK_PW) gotten from creating operation
cancel_lru_locks osc
-
- time1=`date +%s`
- #MAX_ATIME_DIFF 60, we update atime only if older than 60 seconds
- sleep 61
-
- multiop_bg_pause $DIR1/f23 or20_c || return 1
+ time1=$(date +%s)
+ echo "now is $time1"
+ sleep $((at_diff + 1))
+
+ echo "starting reads"
+ multiop_bg_pause $DIR1/$tfile or20_c || return 1
# with SOM and opencache enabled, we need to close a file and cancel
# open lock to get atime propogated to MDS
- kill -USR1 $!
+ kill -USR1 $! || return 2
cancel_lru_locks mdc
- time2=`stat -c "%X" $DIR2/f23`
+ time2=$(stat -c "%X" $DIR/$tfile)
+ echo "new atime is $time2"
- if (( $time2 <= $time1 )); then
- error "atime doesn't update among nodes"
- fi
-
- rm -f $DIR1/f23 || error "rm -f $DIR1/f23 failed"
+ [ $time2 -gt $time1 ] || error "atime was not updated"
+ rm -f $DIR1/$tfile || error "rm -f $DIR1/$tfile failed"
true
}
run_test 23 " others should see updated atime while another read===="
lfs df -i $DIR2 || error "lfs df -i $DIR2 failed"
lfs df $DIR1/$tfile || error "lfs df $DIR1/$tfile failed"
lfs df -ih $DIR2/$tfile || error "lfs df -ih $DIR2/$tfile failed"
-
+
OSC=`lctl dl | awk '/-osc-|OSC.*MNT/ {print $4}' | head -n 1`
# OSC=`lctl dl | awk '/-osc-/ {print $4}' | head -n 1`
lctl --device %$OSC deactivate
}
test_32a() { # bug 11270
- local p="$TMP/sanityN-$TESTNAME.parameters"
- save_lustre_params $HOSTNAME osc.*.lockless_truncate > $p
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+ save_lustre_params client "osc.*.lockless_truncate" > $p
cancel_lru_locks osc
enable_lockless_truncate 1
rm -f $DIR1/$tfile
test_32b() { # bug 11270
remote_ost_nodsh && skip "remote OST with nodsh" && return
- local node
- local p="$TMP/sanityN-$TESTNAME.parameters"
- save_lustre_params $HOSTNAME "osc.*.contention_seconds" > $p
- for node in $(osts_nodes); do
- save_lustre_params $node "ldlm.namespaces.filter-*.max_nolock_bytes" >> $p
- save_lustre_params $node "ldlm.namespaces.filter-*.contended_locks" >> $p
- save_lustre_params $node "ldlm.namespaces.filter-*.contention_seconds" >> $p
- done
- clear_osc_stats
+ local node
+ local facets=$(get_facets OST)
+ local p="$TMP/$TESTSUITE-$TESTNAME.parameters"
+
+ save_lustre_params client "osc.*.contention_seconds" > $p
+ save_lustre_params $facets \
+ "ldlm.namespaces.filter-*.max_nolock_bytes" >> $p
+ save_lustre_params $facets \
+ "ldlm.namespaces.filter-*.contended_locks" >> $p
+ save_lustre_params $facets \
+ "ldlm.namespaces.filter-*.contention_seconds" >> $p
+ clear_osc_stats
+
# agressive lockless i/o settings
for node in $(osts_nodes); do
do_node $node 'lctl set_param -n ldlm.namespaces.filter-*.max_nolock_bytes 2000000; lctl set_param -n ldlm.namespaces.filter-*.contended_locks 0; lctl set_param -n ldlm.namespaces.filter-*.contention_seconds 60'
local param_file=$TMP/$tfile-params
local fstype=$(facet_fstype $SINGLEMDS)
- save_lustre_params $(comma_list $(mdts_nodes)) "mdt.*.commit_on_sharing" > $param_file
+ save_lustre_params $(get_facets MDS) \
+ "mdt.*.commit_on_sharing" > $param_file
local COS
local jbdold="N/A"
local nfiles=${TEST33_NFILES:-10000}
local param_file=$TMP/$tfile-params
- save_lustre_params $(comma_list $(mdts_nodes)) \
- "mdt.*.commit_on_sharing" > $param_file
+ save_lustre_params $(get_facets MDS) \
+ "mdt.*.commit_on_sharing" > $param_file
+
local COS
local jbdold
local jbdnew
run_test 51a "layout lock: refresh layout should work"
test_51b() {
+ [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.59) ]] ||
+ { skip "Need MDS version at least 2.3.59"; return 0; }
+
local tmpfile=`mktemp`
# create an empty file
}
run_test 70b "remove files after calling rm_entry"
+test_71() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
+
+ [[ $server_version -lt $(version_code 2.1.6) ]] &&
+ skip "Need MDS version at least 2.1.6" && return
+
+ # Patch not applied to 2.2 and 2.3 branches
+ [[ $server_version -ge $(version_code 2.2.0) ]] &&
+ [[ $server_version -lt $(version_code 2.4.0) ]] &&
+ skip "Need MDS version at least 2.4.0" && return
+
+ checkfiemap --test ||
+ { skip "checkfiemap not runnable: $?" && return; }
+ # write data this way: hole - data - hole - data
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
+ [ "$(facet_fstype ost$(($($GETSTRIPE -i $DIR1/$tfile) + 1)))" = \
+ "zfs" ] &&
+ skip "ORI-366/LU-1941: FIEMAP unimplemented on ZFS" && return 0
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
+ GET_STAT="lctl get_param -n ldlm.services.ldlm_cbd.stats"
+ stat $DIR2/$tfile
+ local can1=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can1
+ checkfiemap $DIR2/$tfile 81920 ||
+ error "data is not flushed from client"
+ local can2=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can2
+
+ # common case of "create file, copy file" on a single node
+ # should not flush data from ost
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
+ stat $DIR1/$tfile
+ local can3=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can3
+ checkfiemap $DIR1/$tfile 81920 ||
+ error 4
+ local can4=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can2
+ [ $can3 -eq $can4 ] || error $((can2-can1)) "cancel RPC occured."
+}
+run_test 71 "correct file map just after write operation is finished"
+
+test_72() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ local tlink1
+ local tlink2
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile | grep value1 ||
+ error "getfattr1 failed"
+ setfattr -n user.attr1 -v value2 $DIR2/$tfile ||
+ error "setfattr2 failed"
+ getfattr -n user.attr1 $DIR1/$tfile | grep value2 ||
+ error "getfattr2 failed"
+
+ # check that trusted.link is consistent
+ tlink1=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ ln $DIR2/$tfile $DIR2/$tfile-2 || error "failed to link"
+ tlink2=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ echo "$tlink1 $tlink2"
+ [ "$tlink1" = "$tlink2" ] && error "trusted.link should have changed!"
+
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 72 "getxattr/setxattr cache should be consistent between nodes"
+
+test_73() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr1 failed"
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr2 failed"
+ clear_llite_stats
+ # PR lock should be cached by now on both clients
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr3 failed"
+ # 2 hits for getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 2 ] || error "not cached in $DIR1"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr4 failed"
+ # 4 hits for more getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 4 ] || error "not cached in $DIR2"
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 73 "getxattr should not cause xattr lock cancellation"
+
+test_74() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.93) ] &&
+ skip "Need MDS version at least 2.4.93" && return
+
+ dd if=/dev/zero of=$DIR1/$tfile-1 bs=1K count=1
+ dd if=/dev/zero of=$DIR1/$tfile-2 bs=1K count=1
+ flocks_test 4 $DIR1/$tfile-1 $DIR2/$tfile-2
+}
+run_test 74 "flock deadlock: different mounts =============="
+
log "cleanup: ======================================================"
[ "$(mount | grep $MOUNT2)" ] && umount $MOUNT2