+test_70a() {
+ local test_dir=$tdir/test_dir
+
+ mkdir -p $DIR1/$tdir
+ if [ $MDSCOUNT -ge 2 ]; then
+ local MDTIDX=1
+ $LFS mkdir -i $MDTIDX $DIR1/$test_dir ||
+ error "Create remote directory failed"
+ else
+ mkdir -p $DIR1/$test_dir
+ fi
+ cd $DIR2/$test_dir || error "cd directory failed"
+ rm -rf $DIR1/$test_dir || error "unlink directory failed"
+
+ cd $DIR2/$tdir || error "exit directory"
+}
+run_test 70a "cd directory && rm directory"
+
+test_70b() { # LU-2781
+ local i
+ mkdir -p $DIR1/$tdir
+
+ touch $DIR1/$tdir/file
+ for ((i = 0; i < 32; i++)); do
+ $LFS rm_entry $DIR1/$tdir/non_existent_dir &>/dev/null
+ done
+ rm $DIR1/$tdir/file || error "cannot remove file after rm_entry"
+
+ touch $DIR1/$tdir/file
+ $LFS mkdir -i0 $DIR1/$tdir/test_dir
+ $LFS rm_entry $DIR1/$tdir/test_dir &>/dev/null
+ rm -rf $DIR1/$tdir/test_dir ||
+ error "cannot remove directory after rm_entry"
+ rm $DIR1/$tdir/file || error "cannot remove file after rm_entry"
+}
+run_test 70b "remove files after calling rm_entry"
+
+test_71() {
+ local server_version=$(lustre_version_code $SINGLEMDS)
+
+ [[ $server_version -lt $(version_code 2.1.6) ]] &&
+ skip "Need MDS version at least 2.1.6" && return
+
+ # Patch not applied to 2.2 and 2.3 branches
+ [[ $server_version -ge $(version_code 2.2.0) ]] &&
+ [[ $server_version -lt $(version_code 2.4.0) ]] &&
+ skip "Need MDS version at least 2.4.0" && return
+
+ checkfiemap --test ||
+ { skip "checkfiemap not runnable: $?" && return; }
+ # write data this way: hole - data - hole - data
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
+ [ "$(facet_fstype ost$(($($GETSTRIPE -i $DIR1/$tfile) + 1)))" = \
+ "zfs" ] &&
+ skip "ORI-366/LU-1941: FIEMAP unimplemented on ZFS" && return 0
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
+ GET_STAT="lctl get_param -n ldlm.services.ldlm_cbd.stats"
+ stat $DIR2/$tfile
+ local can1=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can1
+ checkfiemap $DIR2/$tfile 81920 ||
+ error "data is not flushed from client"
+ local can2=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can2
+
+ # common case of "create file, copy file" on a single node
+ # should not flush data from ost
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=1 count=1
+ dd if=/dev/urandom of=$DIR1/$tfile bs=40K seek=3 count=1
+ stat $DIR1/$tfile
+ local can3=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can3
+ checkfiemap $DIR1/$tfile 81920 ||
+ error 4
+ local can4=$($GET_STAT | awk '/ldlm_bl_callback/ {print $2}')
+ echo $can2
+ [ $can3 -eq $can4 ] || error $((can2-can1)) "cancel RPC occured."
+}
+run_test 71 "correct file map just after write operation is finished"
+
+test_72() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ local tlink1
+ local tlink2
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile | grep value1 ||
+ error "getfattr1 failed"
+ setfattr -n user.attr1 -v value2 $DIR2/$tfile ||
+ error "setfattr2 failed"
+ getfattr -n user.attr1 $DIR1/$tfile | grep value2 ||
+ error "getfattr2 failed"
+
+ # check that trusted.link is consistent
+ tlink1=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ ln $DIR2/$tfile $DIR2/$tfile-2 || error "failed to link"
+ tlink2=$(getfattr -n trusted.link $DIR1/$tfile | md5sum)
+ echo "$tlink1 $tlink2"
+ [ "$tlink1" = "$tlink2" ] && error "trusted.link should have changed!"
+
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 72 "getxattr/setxattr cache should be consistent between nodes"
+
+test_73() {
+ local p="$TMP/sanityN-$TESTNAME.parameters"
+ save_lustre_params client "llite.*.xattr_cache" > $p
+ lctl set_param llite.*.xattr_cache 1 ||
+ { skip "xattr cache is not supported"; return 0; }
+
+ touch $DIR1/$tfile
+ setfattr -n user.attr1 -v value1 $DIR1/$tfile ||
+ error "setfattr1 failed"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr1 failed"
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr2 failed"
+ clear_llite_stats
+ # PR lock should be cached by now on both clients
+ getfattr -n user.attr1 $DIR1/$tfile || error "getfattr3 failed"
+ # 2 hits for getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 2 ] || error "not cached in $DIR1"
+ getfattr -n user.attr1 $DIR2/$tfile || error "getfattr4 failed"
+ # 4 hits for more getfattr(0)+getfattr(size)
+ [ $(calc_llite_stats getxattr_hits) -eq 4 ] || error "not cached in $DIR2"
+ rm -f $DIR2/$tfile
+
+ restore_lustre_params < $p
+ rm -f $p
+}
+run_test 73 "getxattr should not cause xattr lock cancellation"
+
+test_74() {
+ [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.93) ] &&
+ skip "Need MDS version at least 2.4.93" && return
+
+ dd if=/dev/zero of=$DIR1/$tfile-1 bs=1K count=1
+ dd if=/dev/zero of=$DIR1/$tfile-2 bs=1K count=1
+ flocks_test 4 $DIR1/$tfile-1 $DIR2/$tfile-2
+}
+run_test 74 "flock deadlock: different mounts =============="
+