if [[ $(uname -m) = aarch64 ]]; then
# bug number: LU-11596
ALWAYS_EXCEPT+=" $GRANT_CHECK_LIST"
- # bug number: LU-11671 LU-11667 LU-11729 LU-4398
- ALWAYS_EXCEPT+=" 45 317 810 817"
+ # bug number: LU-11671 LU-11667 LU-4398
+ ALWAYS_EXCEPT+=" 45 317 817"
fi
# 5 12 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="27m 64b 68 71 115 300o"
if [ "$mds1_FSTYPE" = "zfs" ]; then
- # bug number for skipped test: LU-1957
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 180"
+ # bug number for skipped test:
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT "
# 13 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="$EXCEPT_SLOW 51b"
fi
}
run_test 27L "lfs pool_list gives correct pool name"
+test_27M() {
+ [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.12.57) ]] &&
+ skip "Need MDS version >= than 2.12.57"
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ $OSTCOUNT -lt 2 ]] && skip_env "need > 1 OST"
+
+ test_mkdir $DIR/$tdir
+
+ # Set default striping on directory
+ $LFS setstripe -C 4 $DIR/$tdir
+
+ echo 1 > $DIR/$tdir/${tfile}.1
+ local count=$($LFS getstripe -c $DIR/$tdir/${tfile}.1)
+ local setcount=4
+ [ $count -eq $setcount ] ||
+ error "(1) stripe count $count, should be $setcount"
+
+ # Capture existing append_stripe_count setting for restore
+ local orig_count=$(do_facet mds1 $LCTL get_param -n mdd.$FSNAME-MDT0000.append_stripe_count)
+ local mdts=$(comma_list $(mdts_nodes))
+ stack_trap "do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=$orig_count" EXIT
+
+ local appendcount=$orig_count
+ echo 1 >> $DIR/$tdir/${tfile}.2_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.2_append)
+ [ $count -eq $appendcount ] ||
+ error "(2)stripe count $count, should be $appendcount for append"
+
+ # Disable O_APPEND striping, verify it works
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=0
+
+ # Should now get the default striping, which is 4
+ setcount=4
+ echo 1 >> $DIR/$tdir/${tfile}.3_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.3_append)
+ [ $count -eq $setcount ] ||
+ error "(3) stripe count $count, should be $setcount"
+
+ # Try changing the stripe count for append files
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=2
+
+ # Append striping is now 2 (directory default is still 4)
+ appendcount=2
+ echo 1 >> $DIR/$tdir/${tfile}.4_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.4_append)
+ [ $count -eq $appendcount ] ||
+ error "(4) stripe count $count, should be $appendcount for append"
+
+ # Test append stripe count of -1
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=-1
+ appendcount=$OSTCOUNT
+ echo 1 >> $DIR/$tdir/${tfile}.5
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.5)
+ [ $count -eq $appendcount ] ||
+ error "(5) stripe count $count, should be $appendcount for append"
+
+ # Set append striping back to default of 1
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=1
+
+ # Try a new default striping, PFL + DOM
+ $LFS setstripe -L mdt -E 1M -E -1 -c 2 $DIR/$tdir
+
+ # Create normal DOM file, DOM returns stripe count == 0
+ setcount=0
+ touch $DIR/$tdir/${tfile}.6
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.6)
+ [ $count -eq $setcount ] ||
+ error "(6) stripe count $count, should be $setcount"
+
+ # Show
+ appendcount=1
+ echo 1 >> $DIR/$tdir/${tfile}.7_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.7_append)
+ [ $count -eq $appendcount ] ||
+ error "(7) stripe count $count, should be $appendcount for append"
+
+ # Clean up DOM layout
+ $LFS setstripe -d $DIR/$tdir
+
+ # Now test that append striping works when layout is from root
+ $LFS setstripe -c 2 $MOUNT
+ # Make a special directory for this
+ mkdir $DIR/${tdir}/${tdir}.2
+ stack_trap "$LFS setstripe -d $MOUNT" EXIT
+
+ # Verify for normal file
+ setcount=2
+ echo 1 > $DIR/${tdir}/${tdir}.2/${tfile}.8
+ count=$($LFS getstripe -c $DIR/$tdir/${tdir}.2/${tfile}.8)
+ [ $count -eq $setcount ] ||
+ error "(8) stripe count $count, should be $setcount"
+
+ appendcount=1
+ echo 1 >> $DIR/${tdir}/${tdir}.2/${tfile}.9_append
+ count=$($LFS getstripe -c $DIR/${tdir}/${tdir}.2/${tfile}.9_append)
+ [ $count -eq $appendcount ] ||
+ error "(9) stripe count $count, should be $appendcount for append"
+
+ # Now test O_APPEND striping with pools
+ do_nodes $mdts $LCTL set_param mdd.*.append_pool="$TESTNAME"
+ stack_trap "do_nodes $mdts $LCTL set_param mdd.*.append_pool='none'" EXIT
+
+ # Create the pool
+ pool_add $TESTNAME || error "pool creation failed"
+ pool_add_targets $TESTNAME 0 1 || error "Pool add targets failed"
+
+ echo 1 >> $DIR/$tdir/${tfile}.10_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.10_append)
+ [ "$pool" = "$TESTNAME" ] || error "(10) incorrect pool: $pool"
+
+ # Check that count is still correct
+ appendcount=1
+ echo 1 >> $DIR/$tdir/${tfile}.11_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.11_append)
+ [ $count -eq $appendcount ] ||
+ error "(11) stripe count $count, should be $appendcount for append"
+
+ # Disable O_APPEND stripe count, verify pool works separately
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=0
+
+ echo 1 >> $DIR/$tdir/${tfile}.12_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.12_append)
+ [ "$pool" = "$TESTNAME" ] || error "(12) incorrect pool: $pool"
+
+ # Remove pool setting, verify it's not applied
+ do_nodes $mdts $LCTL set_param mdd.*.append_pool='none'
+
+ echo 1 >> $DIR/$tdir/${tfile}.13_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.13_append)
+ [ "$pool" = "" ] || error "(13) pool found: $pool"
+}
+run_test 27M "test O_APPEND striping"
+
# createtest also checks that device nodes are created and
# then visible correctly (#2091)
test_28() { # bug 2091
local pass=true
#get fid and record list
- fid_list=($(awk '/9_sub.*record/ { print $NF }' /$TMP/$tfile |
+ fid_list=($(awk '/9_sub.*record/ { print $NF }' $TMP/$tfile |
tail -n 4))
- rec_list=($(awk '/9_sub.*record/ { print $((NF-3)) }' /$TMP/$tfile |
+ rec_list=($(awk '/9_sub.*record/ { print $((NF-3)) }' $TMP/$tfile |
tail -n 4))
#remount mgs as ldiskfs or zfs type
stop mgs || error "stop mgs failed"
test_60g() {
local pid
+ local i
test_mkdir -c $MDSCOUNT $DIR/$tdir
- $LFS setdirstripe -D -i -1 -c $MDSCOUNT $DIR/$tdir
(
local index=0
while true; do
+ $LFS setdirstripe -i $(($index % $MDSCOUNT)) \
+ -c $MDSCOUNT $DIR/$tdir/subdir$index \
+ 2>/dev/null
mkdir $DIR/$tdir/subdir$index 2>/dev/null
rmdir $DIR/$tdir/subdir$index 2>/dev/null
index=$((index + 1))
pid=$!
- for i in $(seq 100); do
+ for i in {0..100}; do
# define OBD_FAIL_OSD_TXN_START 0x19a
- do_facet mds1 lctl set_param fail_loc=0x8000019a
+ local index=$((i % MDSCOUNT + 1))
+
+ do_facet mds$index $LCTL set_param fail_loc=0x8000019a \
+ > /dev/null
usleep 100
done
kill -9 $pid
+ for i in $(seq $MDSCOUNT); do
+ do_facet mds$i $LCTL set_param fail_loc=0 > /dev/null
+ done
+
mkdir $DIR/$tdir/new || error "mkdir failed"
rmdir $DIR/$tdir/new || error "rmdir failed"
+
+ do_facet mds1 $LCTL lfsck_start -M $(facet_svc mds1) -A -C \
+ -t namespace
+ for i in $(seq $MDSCOUNT); do
+ wait_update_facet mds$i "$LCTL get_param -n \
+ mdd.$(facet_svc mds$i).lfsck_namespace |
+ awk '/^status/ { print \\\$2 }'" "completed"
+ done
+
+ ls -R $DIR/$tdir || error "ls failed"
+ rm -rf $DIR/$tdir || error "rmdir failed"
}
run_test 60g "transaction abort won't cause MDT hung"
set_checksum_type()
{
lctl set_param -n osc.*osc-[^mM]*.checksum_type $1
- log "set checksum type to $1"
- return 0
+ rc=$?
+ log "set checksum type to $1, rc = $rc"
+ return $rc
+}
+
+get_osc_checksum_type()
+{
+ # arugment 1: OST name, like OST0000
+ ost=$1
+ checksum_type=$(lctl get_param -n osc.*${ost}-osc-[^mM]*.checksum_type |
+ sed 's/.*\[\(.*\)\].*/\1/g')
+ rc=$?
+ [ $rc -ne 0 ] && error "failed to get checksum type of $ost, rc = $rc, output = $checksum_type"
+ echo $checksum_type
}
+
F77_TMP=$TMP/f77-temp
F77SZ=8
setup_f77() {
}
run_test 77k "enable/disable checksum correctly"
+test_77l() {
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ $GSS && skip_env "could not run with gss"
+
+ set_checksums 1
+ stack_trap "set_checksums $ORIG_CSUM" EXIT
+ stack_trap "set_checksum_type $ORIG_CSUM_TYPE" EXIT
+
+ set_checksum_type invalid && error "unexpected success of invalid checksum type"
+
+ $LFS setstripe -c 1 -i 0 $DIR/$tfile
+ for algo in $CKSUM_TYPES; do
+ set_checksum_type $algo || error "fail to set checksum type $algo"
+ osc_algo=$(get_osc_checksum_type OST0000)
+ [ "$osc_algo" != "$algo" ] && error "checksum type is $osc_algo after setting it to $algo"
+
+ # no locks, no reqs to let the connection idle
+ cancel_lru_locks osc
+ lru_resize_disable osc
+ wait_osc_import_state client ost1 IDLE
+
+ # ensure ost1 is connected
+ stat $DIR/$tfile >/dev/null || error "can't stat"
+ wait_osc_import_state client ost1 FULL
+
+ osc_algo=$(get_osc_checksum_type OST0000)
+ [ "$osc_algo" != "$algo" ] && error "checksum type changed from $algo to $osc_algo after reconnection"
+ done
+ return 0
+}
+run_test 77l "preferred checksum type is remembered after reconnected"
+
[ "$ORIG_CSUM" ] && set_checksums $ORIG_CSUM || true
rm -f $F77_TMP
unset F77_TMP
cancel_lru_locks osc
$LCTL set_param osc.*.rpc_stats 0
$READS -f $DIR/$tfile -s$FILE_LENGTH -b$rsize -n$nreads -t 180
+ $LCTL get_param osc.*.rpc_stats
for osc_rpc_stats in $($LCTL get_param -N osc.*.rpc_stats); do
local stats=$($LCTL get_param -n $osc_rpc_stats)
local lines=$(echo "$stats" | awk 'END {print NR;}')
local size
if [ $lines -le 20 ]; then
+ echo "continue debug"
continue
fi
for size in 1 2 4 8; do
}
run_test 160i "changelog user register/unregister race"
+test_160j() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ $MDS1_VERSION -lt $(version_code 2.12.56) ]] &&
+ skip "Need MDS version at least 2.12.56"
+
+ mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
+
+ changelog_register || error "first changelog_register failed"
+
+ # generate some changelog
+ test_mkdir -c $MDSCOUNT $DIR/$tdir || error "mkdir $tdir failed"
+ createmany -m $DIR/$tdir/${tfile}bis $((MDSCOUNT * 2)) ||
+ error "create $DIR/$tdir/${tfile}bis failed"
+
+ # open the changelog device
+ exec 3>/dev/changelog-$FSNAME-MDT0000
+ exec 4</dev/changelog-$FSNAME-MDT0000
+
+ # umount the first lustre mount
+ umount $MOUNT
+
+ # read changelog
+ cat <&4 >/dev/null || error "read changelog failed"
+
+ # clear changelog
+ local cl_user="${CL_USERS[$SINGLEMDS]%% *}"
+ changelog_users $SINGLEMDS | grep -q $cl_user ||
+ error "User $cl_user not found in changelog_users"
+
+ printf 'clear:'$cl_user':0' >&3
+
+ # close
+ exec 3>&-
+ exec 4<&-
+
+ # cleanup
+ changelog_deregister || error "changelog_deregister failed"
+
+ umount $MOUNT2
+ mount_client $MOUNT || error "mount_client on $MOUNT failed"
+}
+run_test 160j "client can be umounted while its chanangelog is being used"
+
test_161a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run"
}
run_test 243 "various group lock tests"
-test_244()
+test_244a()
{
test_mkdir $DIR/$tdir
dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=35
error "sendfile+grouplock failed"
rm -rf $DIR/$tdir
}
-run_test 244 "sendfile with group lock tests"
+run_test 244a "sendfile with group lock tests"
+
+test_244b()
+{
+ [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+
+ local threads=50
+ local size=$((1024*1024))
+
+ test_mkdir $DIR/$tdir
+ for i in $(seq 1 $threads); do
+ local file=$DIR/$tdir/file_$((i / 10))
+ $MULTIOP $file OG1234w$size_$((i % 3))w$size_$((i % 4))g1234c &
+ local pids[$i]=$!
+ done
+ for i in $(seq 1 $threads); do
+ wait ${pids[$i]}
+ done
+}
+run_test 244b "multi-threaded write with group lock"
test_245() {
local flagname="multi_mod_rpcs"
test_255c() {
[ $OST1_VERSION -lt $(version_code 2.10.50) ] &&
- skip "lustre < 2.10.53 does not support lockahead"
+ skip "lustre < 2.10.50 does not support lockahead"
local count
local new_count
local mdtidx=$($LFS getstripe --mdt-index $DIR/$tdir)
cancel_lru_locks mdc
- dd if=/dev/urandom of=$tmp bs=200000 count=1
- dd if=$tmp of=$dom bs=200000 count=1
+ dd if=/dev/urandom of=$tmp bs=265000 count=1
+ dd if=$tmp of=$dom bs=265000 count=1
cancel_lru_locks mdc
cat /etc/hosts >> $tmp
lctl set_param -n mdc.*.stats=clear
local ra=$(get_mdc_stats $mdtidx req_active)
local rw=$(get_mdc_stats $mdtidx req_waittime)
+ [ -z $num ] && num=0
[ $num -eq 1 ] || error "expect 1 READ RPC, $num occured"
[ $ra == $rw ] || error "$((ra - rw)) resend occured"
echo "... DONE"
$LFS migrate -c2 $dom ||
error "failed to migrate to the new composite layout"
- [ $($LFS getstripe -L $dom) == 'mdt' ] &&
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
error "MDT stripe was not removed"
cancel_lru_locks mdc
local new_md5=$(md5sum $dom)
- [ "$old_md5" != "$new_md5" ] &&
+ [ "$old_md5" == "$new_md5" ] ||
error "$old_md5 != $new_md5"
# Skip free space checks with ZFS
cancel_lru_locks mdc
local new_md5=$(md5sum $dom)
- [ "$old_md5" != "$new_md5" ] &&
+ [ "$old_md5" == "$new_md5" ] ||
error "$old_md5 != $new_md5"
# Skip free space checks with ZFS
}
run_test 272c "DoM migration: DOM file to the OST-striped file (composite)"
+test_272d() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -E 1M -L mdt -E -1 -c1 $dom
+
+ local mdtidx=$($LFS getstripe -m $dom)
+ local mdtname=MDT$(printf %04x $mdtidx)
+ local facet=mds$((mdtidx + 1))
+
+ dd if=/dev/urandom of=$dom bs=2M count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+ local mdtfree1=$(do_facet $facet \
+ lctl get_param -n osd*.*$mdtname.kbytesfree)
+
+ $LFS mirror extend -N -E 2M -c1 -E -1 -c2 $dom ||
+ error "failed mirroring to the new composite layout"
+ $LFS mirror resync $dom ||
+ error "failed mirror resync"
+ $LFS mirror split --mirror-id 1 -d $dom ||
+ error "failed mirror split"
+
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
+ error "MDT stripe was not removed"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" == "$new_md5" ] ||
+ error "$old_md5 != $new_md5"
+
+ # Skip free space checks with ZFS
+ if [ "$(facet_fstype $facet)" != "zfs" ]; then
+ local mdtfree2=$(do_facet $facet \
+ lctl get_param -n osd*.*$mdtname.kbytesfree)
+ [ $mdtfree2 -gt $mdtfree1 ] ||
+ error "MDS space is not freed after DOM mirror deletion"
+ fi
+ return 0
+}
+run_test 272d "DoM mirroring: OST-striped mirror to DOM file"
+
+test_272e() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -c 2 $dom
+
+ dd if=/dev/urandom of=$dom bs=512K count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+
+ $LFS mirror extend -N -E 1M -L mdt -E eof -c2 $dom ||
+ error "failed mirroring to the DOM layout"
+ $LFS mirror resync $dom ||
+ error "failed mirror resync"
+ $LFS mirror split --mirror-id 1 -d $dom ||
+ error "failed mirror split"
+
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
+ error "MDT stripe was not removed"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" == "$new_md5" ] ||
+ error "$old_md5 != $new_md5"
+
+ return 0
+}
+run_test 272e "DoM mirroring: DOM mirror to the OST-striped file"
+
+test_272f() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -c 2 $dom
+
+ dd if=/dev/urandom of=$dom bs=512K count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+
+ $LFS migrate -E 1M -L mdt -E eof -c2 -v $dom ||
+ error "failed migrating to the DOM file"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" != "$new_md5" ] &&
+ error "$old_md5 != $new_md5"
+
+ return 0
+}
+run_test 272f "DoM migration: OST-striped file to DOM file"
+
test_273a() {
[ $MDS1_VERSION -lt $(version_code 2.11.50) ] &&
skip "Need MDS version at least 2.11.50"
}
run_test 277 "Direct IO shall drop page cache"
+test_278() {
+ [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [[ "$(facet_host mds1)" != "$(facet_host mds2)" ]] &&
+ skip "needs the same host for mdt1 mdt2" && return
+
+ local pid1
+ local pid2
+
+#define OBD_FAIL_OBD_STOP_MDS_RACE 0x60b
+ do_facet mds2 $LCTL set_param fail_loc=0x8000060c
+ stop mds2 &
+ pid2=$!
+
+ stop mds1
+
+ echo "Starting MDTs"
+ start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
+ wait $pid2
+#For the error assertion will happen. lu_env_get_key(..., &mdt_thread_key)
+#will return NULL
+ do_facet mds2 $LCTL set_param fail_loc=0
+
+ start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS
+ wait_recovery_complete mds2
+}
+run_test 278 "Race starting MDS between MDTs stop/start"
+
cleanup_test_300() {
trap 0
umask $SAVE_UMASK
}
test_802a() {
-
+ [[ $mds1_FSTYPE = zfs ]] || skip "ZFS specific test"
[[ $(lustre_version_code mds1) -lt $(version_code 2.9.55) ]] ||
[[ $OST1_VERSION -lt $(version_code 2.9.55) ]] &&
skip "Need server version at least 2.9.55"
run_test 809 "Verify no SOM xattr store for DoM-only files"
test_810() {
- local ORIG
- local CSUM
-
- # t10 seem to dislike partial pages
- lctl set_param osc.*.checksum_type=adler
- lctl set_param fail_loc=0x411
- dd if=/dev/urandom of=$DIR/$tfile bs=10240 count=2
- ORIG=$(md5sum $DIR/$tfile)
- lctl set_param ldlm.namespaces.*osc*.lru_size=clear
- CSUM=$(md5sum $DIR/$tfile)
- set_checksum_type adler
- if [ "$ORIG" != "$CSUM" ]; then
- error "$ORIG != $CSUM"
- fi
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ $GSS && skip_env "could not run with gss"
+
+ set_checksums 1
+ stack_trap "set_checksums $ORIG_CSUM" EXIT
+ stack_trap "set_checksum_type $ORIG_CSUM_TYPE" EXIT
+
+ local csum
+ local before
+ local after
+ for csum in $CKSUM_TYPES; do
+ #define OBD_FAIL_OSC_NO_GRANT 0x411
+ $LCTL set_param osc.*.checksum_type=$csum fail_loc=0x411
+ for i in "10240 0" "10000 0" "4000 1" "500 1"; do
+ eval set -- $i
+ dd if=/dev/urandom of=$DIR/$tfile bs=$1 count=2 seek=$2
+ before=$(md5sum $DIR/$tfile)
+ $LCTL set_param ldlm.namespaces.*osc*.lru_size=clear
+ after=$(md5sum $DIR/$tfile)
+ [ "$before" == "$after" ] ||
+ error "$csum: $before != $after bs=$1 seek=$2"
+ done
+ done
}
run_test 810 "partial page writes on ZFS (LU-11663)"
}
run_test 817 "nfsd won't cache write lock for exec file"
+test_818() {
+ mkdir $DIR/$tdir
+ $LFS setstripe -c1 -i0 $DIR/$tfile
+ $LFS setstripe -c1 -i1 $DIR/$tfile
+ stop $SINGLEMDS
+ #define OBD_FAIL_OSP_CANT_PROCESS_LLOG 0x2105
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80002105
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
+ error "start $SINGLEMDS failed"
+ rm -rf $DIR/$tdir
+}
+run_test 818 "unlink with failed llog"
+
#
# tests that do cleanup/setup should be run at the end
#