if [[ $(uname -m) = aarch64 ]]; then
# bug number: LU-11596
ALWAYS_EXCEPT+=" $GRANT_CHECK_LIST"
- # bug number: LU-11671 LU-11667 LU-11729 LU-4398
- ALWAYS_EXCEPT+=" 45 317 810 817"
+ # bug number: LU-11671 LU-11667 LU-4398
+ ALWAYS_EXCEPT+=" 45 317 817"
fi
# 5 12 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="27m 64b 68 71 115 300o"
if [ "$mds1_FSTYPE" = "zfs" ]; then
- # bug number for skipped test: LU-1957
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT 180"
+ # bug number for skipped test:
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT "
# 13 (min)"
[ "$SLOW" = "no" ] && EXCEPT_SLOW="$EXCEPT_SLOW 51b"
fi
}
run_test 27L "lfs pool_list gives correct pool name"
+test_27M() {
+ [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.12.57) ]] &&
+ skip "Need MDS version >= than 2.12.57"
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ $OSTCOUNT -lt 2 ]] && skip_env "need > 1 OST"
+
+ test_mkdir $DIR/$tdir
+
+ # Set default striping on directory
+ $LFS setstripe -C 4 $DIR/$tdir
+
+ echo 1 > $DIR/$tdir/${tfile}.1
+ local count=$($LFS getstripe -c $DIR/$tdir/${tfile}.1)
+ local setcount=4
+ [ $count -eq $setcount ] ||
+ error "(1) stripe count $count, should be $setcount"
+
+ # Capture existing append_stripe_count setting for restore
+ local orig_count=$(do_facet mds1 $LCTL get_param -n mdd.$FSNAME-MDT0000.append_stripe_count)
+ local mdts=$(comma_list $(mdts_nodes))
+ stack_trap "do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=$orig_count" EXIT
+
+ local appendcount=$orig_count
+ echo 1 >> $DIR/$tdir/${tfile}.2_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.2_append)
+ [ $count -eq $appendcount ] ||
+ error "(2)stripe count $count, should be $appendcount for append"
+
+ # Disable O_APPEND striping, verify it works
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=0
+
+ # Should now get the default striping, which is 4
+ setcount=4
+ echo 1 >> $DIR/$tdir/${tfile}.3_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.3_append)
+ [ $count -eq $setcount ] ||
+ error "(3) stripe count $count, should be $setcount"
+
+ # Try changing the stripe count for append files
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=2
+
+ # Append striping is now 2 (directory default is still 4)
+ appendcount=2
+ echo 1 >> $DIR/$tdir/${tfile}.4_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.4_append)
+ [ $count -eq $appendcount ] ||
+ error "(4) stripe count $count, should be $appendcount for append"
+
+ # Test append stripe count of -1
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=-1
+ appendcount=$OSTCOUNT
+ echo 1 >> $DIR/$tdir/${tfile}.5
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.5)
+ [ $count -eq $appendcount ] ||
+ error "(5) stripe count $count, should be $appendcount for append"
+
+ # Set append striping back to default of 1
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=1
+
+ # Try a new default striping, PFL + DOM
+ $LFS setstripe -L mdt -E 1M -E -1 -c 2 $DIR/$tdir
+
+ # Create normal DOM file, DOM returns stripe count == 0
+ setcount=0
+ touch $DIR/$tdir/${tfile}.6
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.6)
+ [ $count -eq $setcount ] ||
+ error "(6) stripe count $count, should be $setcount"
+
+ # Show
+ appendcount=1
+ echo 1 >> $DIR/$tdir/${tfile}.7_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.7_append)
+ [ $count -eq $appendcount ] ||
+ error "(7) stripe count $count, should be $appendcount for append"
+
+ # Clean up DOM layout
+ $LFS setstripe -d $DIR/$tdir
+
+ # Now test that append striping works when layout is from root
+ $LFS setstripe -c 2 $MOUNT
+ # Make a special directory for this
+ mkdir $DIR/${tdir}/${tdir}.2
+ stack_trap "$LFS setstripe -d $MOUNT" EXIT
+
+ # Verify for normal file
+ setcount=2
+ echo 1 > $DIR/${tdir}/${tdir}.2/${tfile}.8
+ count=$($LFS getstripe -c $DIR/$tdir/${tdir}.2/${tfile}.8)
+ [ $count -eq $setcount ] ||
+ error "(8) stripe count $count, should be $setcount"
+
+ appendcount=1
+ echo 1 >> $DIR/${tdir}/${tdir}.2/${tfile}.9_append
+ count=$($LFS getstripe -c $DIR/${tdir}/${tdir}.2/${tfile}.9_append)
+ [ $count -eq $appendcount ] ||
+ error "(9) stripe count $count, should be $appendcount for append"
+
+ # Now test O_APPEND striping with pools
+ do_nodes $mdts $LCTL set_param mdd.*.append_pool="$TESTNAME"
+ stack_trap "do_nodes $mdts $LCTL set_param mdd.*.append_pool='none'" EXIT
+
+ # Create the pool
+ pool_add $TESTNAME || error "pool creation failed"
+ pool_add_targets $TESTNAME 0 1 || error "Pool add targets failed"
+
+ echo 1 >> $DIR/$tdir/${tfile}.10_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.10_append)
+ [ "$pool" = "$TESTNAME" ] || error "(10) incorrect pool: $pool"
+
+ # Check that count is still correct
+ appendcount=1
+ echo 1 >> $DIR/$tdir/${tfile}.11_append
+ count=$($LFS getstripe -c $DIR/$tdir/${tfile}.11_append)
+ [ $count -eq $appendcount ] ||
+ error "(11) stripe count $count, should be $appendcount for append"
+
+ # Disable O_APPEND stripe count, verify pool works separately
+ do_nodes $mdts $LCTL set_param mdd.*.append_stripe_count=0
+
+ echo 1 >> $DIR/$tdir/${tfile}.12_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.12_append)
+ [ "$pool" = "$TESTNAME" ] || error "(12) incorrect pool: $pool"
+
+ # Remove pool setting, verify it's not applied
+ do_nodes $mdts $LCTL set_param mdd.*.append_pool='none'
+
+ echo 1 >> $DIR/$tdir/${tfile}.13_append
+
+ pool=$($LFS getstripe -p $DIR/$tdir/${tfile}.13_append)
+ [ "$pool" = "" ] || error "(13) pool found: $pool"
+}
+run_test 27M "test O_APPEND striping"
+
# createtest also checks that device nodes are created and
# then visible correctly (#2091)
test_28() { # bug 2091
local pass=true
#get fid and record list
- fid_list=($(awk '/9_sub.*record/ { print $NF }' /$TMP/$tfile |
+ fid_list=($(awk '/9_sub.*record/ { print $NF }' $TMP/$tfile |
tail -n 4))
- rec_list=($(awk '/9_sub.*record/ { print $((NF-3)) }' /$TMP/$tfile |
+ rec_list=($(awk '/9_sub.*record/ { print $((NF-3)) }' $TMP/$tfile |
tail -n 4))
#remount mgs as ldiskfs or zfs type
stop mgs || error "stop mgs failed"
wait_update $HOSTNAME "$get_checksum" $i
#remount
echo "remount client, checksum should be $i"
- remount_client $MOUNT || "failed to remount client"
+ remount_client $MOUNT || error "failed to remount client"
checksum=$(eval $get_checksum)
[ $checksum -eq $i ] || error "checksum($checksum) != $i"
done
for opt in "checksum" "nochecksum"; do
#remount with mount option
echo "remount client with option $opt, checksum should be $i"
- umount_client $MOUNT || "failed to umount client"
+ umount_client $MOUNT || error "failed to umount client"
mount_client $MOUNT "$MOUNT_OPTS,$opt" ||
- "failed to mount client with option '$opt'"
+ error "failed to mount client with option '$opt'"
checksum=$(eval $get_checksum)
[ $checksum -eq $i ] || error "checksum($checksum) != $i"
i=$((i - 1))
done
- remount_client $MOUNT || "failed to remount client"
+ remount_client $MOUNT || error "failed to remount client"
}
run_test 77k "enable/disable checksum correctly"
}
run_test 160i "changelog user register/unregister race"
+test_160j() {
+ remote_mds_nodsh && skip "remote MDS with nodsh"
+ [[ $MDS1_VERSION -lt $(version_code 2.12.56) ]] &&
+ skip "Need MDS version at least 2.12.56"
+
+ mount_client $MOUNT2 || error "mount_client on $MOUNT2 failed"
+
+ changelog_register || error "first changelog_register failed"
+
+ # generate some changelog
+ test_mkdir -c $MDSCOUNT $DIR/$tdir || error "mkdir $tdir failed"
+ createmany -m $DIR/$tdir/${tfile}bis $((MDSCOUNT * 2)) ||
+ error "create $DIR/$tdir/${tfile}bis failed"
+
+ # open the changelog device
+ exec 3>/dev/changelog-$FSNAME-MDT0000
+ exec 4</dev/changelog-$FSNAME-MDT0000
+
+ # umount the first lustre mount
+ umount $MOUNT
+
+ # read changelog
+ cat <&4 >/dev/null || error "read changelog failed"
+
+ # clear changelog
+ local cl_user="${CL_USERS[$SINGLEMDS]%% *}"
+ changelog_users $SINGLEMDS | grep -q $cl_user ||
+ error "User $cl_user not found in changelog_users"
+
+ printf 'clear:'$cl_user':0' >&3
+
+ # close
+ exec 3>&-
+ exec 4<&-
+
+ # cleanup
+ changelog_deregister || error "changelog_deregister failed"
+
+ umount $MOUNT2
+ mount_client $MOUNT || error "mount_client on $MOUNT failed"
+}
+run_test 160j "client can be umounted while its chanangelog is being used"
+
test_161a() {
[ $PARALLEL == "yes" ] && skip "skip parallel run"
}
run_test 243 "various group lock tests"
-test_244()
+test_244a()
{
test_mkdir $DIR/$tdir
dd if=/dev/zero of=$DIR/$tdir/$tfile bs=1M count=35
error "sendfile+grouplock failed"
rm -rf $DIR/$tdir
}
-run_test 244 "sendfile with group lock tests"
+run_test 244a "sendfile with group lock tests"
+
+test_244b()
+{
+ [ $PARALLEL == "yes" ] && skip "skip parallel run" && return
+
+ local threads=50
+ local size=$((1024*1024))
+
+ test_mkdir $DIR/$tdir
+ for i in $(seq 1 $threads); do
+ local file=$DIR/$tdir/file_$((i / 10))
+ $MULTIOP $file OG1234w$size_$((i % 3))w$size_$((i % 4))g1234c &
+ local pids[$i]=$!
+ done
+ for i in $(seq 1 $threads); do
+ wait ${pids[$i]}
+ done
+}
+run_test 244b "multi-threaded write with group lock"
test_245() {
local flagname="multi_mod_rpcs"
test_255c() {
[ $OST1_VERSION -lt $(version_code 2.10.50) ] &&
- skip "lustre < 2.10.53 does not support lockahead"
+ skip "lustre < 2.10.50 does not support lockahead"
local count
local new_count
local mdtidx=$($LFS getstripe --mdt-index $DIR/$tdir)
cancel_lru_locks mdc
- dd if=/dev/urandom of=$tmp bs=200000 count=1
- dd if=$tmp of=$dom bs=200000 count=1
+ dd if=/dev/urandom of=$tmp bs=265000 count=1
+ dd if=$tmp of=$dom bs=265000 count=1
cancel_lru_locks mdc
cat /etc/hosts >> $tmp
lctl set_param -n mdc.*.stats=clear
local ra=$(get_mdc_stats $mdtidx req_active)
local rw=$(get_mdc_stats $mdtidx req_waittime)
+ [ -z $num ] && num=0
[ $num -eq 1 ] || error "expect 1 READ RPC, $num occured"
[ $ra == $rw ] || error "$((ra - rw)) resend occured"
echo "... DONE"
$LFS migrate -c2 $dom ||
error "failed to migrate to the new composite layout"
- [ $($LFS getstripe -L $dom) == 'mdt' ] &&
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
error "MDT stripe was not removed"
cancel_lru_locks mdc
local new_md5=$(md5sum $dom)
- [ "$old_md5" != "$new_md5" ] &&
+ [ "$old_md5" == "$new_md5" ] ||
error "$old_md5 != $new_md5"
# Skip free space checks with ZFS
cancel_lru_locks mdc
local new_md5=$(md5sum $dom)
- [ "$old_md5" != "$new_md5" ] &&
+ [ "$old_md5" == "$new_md5" ] ||
error "$old_md5 != $new_md5"
# Skip free space checks with ZFS
}
run_test 272c "DoM migration: DOM file to the OST-striped file (composite)"
+test_272d() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -E 1M -L mdt -E -1 -c1 $dom
+
+ local mdtidx=$($LFS getstripe -m $dom)
+ local mdtname=MDT$(printf %04x $mdtidx)
+ local facet=mds$((mdtidx + 1))
+
+ dd if=/dev/urandom of=$dom bs=2M count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+ local mdtfree1=$(do_facet $facet \
+ lctl get_param -n osd*.*$mdtname.kbytesfree)
+
+ $LFS mirror extend -N -E 2M -c1 -E -1 -c2 $dom ||
+ error "failed mirroring to the new composite layout"
+ $LFS mirror resync $dom ||
+ error "failed mirror resync"
+ $LFS mirror split --mirror-id 1 -d $dom ||
+ error "failed mirror split"
+
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
+ error "MDT stripe was not removed"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" == "$new_md5" ] ||
+ error "$old_md5 != $new_md5"
+
+ # Skip free space checks with ZFS
+ if [ "$(facet_fstype $facet)" != "zfs" ]; then
+ local mdtfree2=$(do_facet $facet \
+ lctl get_param -n osd*.*$mdtname.kbytesfree)
+ [ $mdtfree2 -gt $mdtfree1 ] ||
+ error "MDS space is not freed after DOM mirror deletion"
+ fi
+ return 0
+}
+run_test 272d "DoM mirroring: OST-striped mirror to DOM file"
+
+test_272e() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -c 2 $dom
+
+ dd if=/dev/urandom of=$dom bs=512K count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+
+ $LFS mirror extend -N -E 1M -L mdt -E eof -c2 $dom ||
+ error "failed mirroring to the DOM layout"
+ $LFS mirror resync $dom ||
+ error "failed mirror resync"
+ $LFS mirror split --mirror-id 1 -d $dom ||
+ error "failed mirror split"
+
+ [ $($LFS getstripe -L $dom) != 'mdt' ] ||
+ error "MDT stripe was not removed"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" == "$new_md5" ] ||
+ error "$old_md5 != $new_md5"
+
+ return 0
+}
+run_test 272e "DoM mirroring: DOM mirror to the OST-striped file"
+
+test_272f() {
+ [ $MDS1_VERSION -lt $(version_code 2.12.55) ] &&
+ skip "Need MDS version at least 2.12.55"
+
+ local dom=$DIR/$tdir/$tfile
+ mkdir -p $DIR/$tdir
+ $LFS setstripe -c 2 $dom
+
+ dd if=/dev/urandom of=$dom bs=512K count=1 oflag=direct ||
+ error "failed to write data into $dom"
+ local old_md5=$(md5sum $dom)
+ cancel_lru_locks mdc
+
+ $LFS migrate -E 1M -L mdt -E eof -c2 -v $dom ||
+ error "failed migrating to the DOM file"
+
+ cancel_lru_locks mdc
+ local new_md5=$(md5sum $dom)
+ [ "$old_md5" != "$new_md5" ] &&
+ error "$old_md5 != $new_md5"
+
+ return 0
+}
+run_test 272f "DoM migration: OST-striped file to DOM file"
+
test_273a() {
[ $MDS1_VERSION -lt $(version_code 2.11.50) ] &&
skip "Need MDS version at least 2.11.50"
do_facet mds2 $LCTL set_param fail_loc=0
start mds2 $(mdsdevname 2) $MDS_MOUNT_OPTS
+ wait_recovery_complete mds2
}
run_test 278 "Race starting MDS between MDTs stop/start"
cnt=$(ls -1 $DIR/$tdir | wc -l)
[ $cnt == 1 ] || error "unexpected #files after (5): $cnt"
- umount_client $MOUNT || "failed to umount client"
+ umount_client $MOUNT || error "failed to umount client"
mount_client $MOUNT "$MOUNT_OPTS,user_fid2path" ||
- "failed to mount client'"
+ error "failed to mount client'"
$RUNAS $LFS rmfid $DIR $FID || error "rmfid failed"
# rmfid should succeed
FID=$(lfs path2fid $DIR/$tdir/f)
$RUNAS $LFS rmfid $DIR $FID && error "rmfid didn't fail"
- umount_client $MOUNT || "failed to umount client"
+ umount_client $MOUNT || error "failed to umount client"
mount_client $MOUNT "$MOUNT_OPTS" ||
- "failed to mount client'"
+ error "failed to mount client'"
}
run_test 421f "rmfid checks permissions"
}
test_802a() {
-
+ [[ $mds1_FSTYPE = zfs ]] || skip "ZFS specific test"
[[ $(lustre_version_code mds1) -lt $(version_code 2.9.55) ]] ||
[[ $OST1_VERSION -lt $(version_code 2.9.55) ]] &&
skip "Need server version at least 2.9.55"
run_test 809 "Verify no SOM xattr store for DoM-only files"
test_810() {
- local ORIG
- local CSUM
-
- # t10 seem to dislike partial pages
- lctl set_param osc.*.checksum_type=adler
- lctl set_param fail_loc=0x411
- dd if=/dev/urandom of=$DIR/$tfile bs=10240 count=2
- ORIG=$(md5sum $DIR/$tfile)
- lctl set_param ldlm.namespaces.*osc*.lru_size=clear
- CSUM=$(md5sum $DIR/$tfile)
- set_checksum_type adler
- if [ "$ORIG" != "$CSUM" ]; then
- error "$ORIG != $CSUM"
- fi
+ [ $PARALLEL == "yes" ] && skip "skip parallel run"
+ $GSS && skip_env "could not run with gss"
+
+ set_checksums 1
+ stack_trap "set_checksums $ORIG_CSUM" EXIT
+ stack_trap "set_checksum_type $ORIG_CSUM_TYPE" EXIT
+
+ local csum
+ local before
+ local after
+ for csum in $CKSUM_TYPES; do
+ #define OBD_FAIL_OSC_NO_GRANT 0x411
+ $LCTL set_param osc.*.checksum_type=$csum fail_loc=0x411
+ for i in "10240 0" "10000 0" "4000 1" "500 1"; do
+ eval set -- $i
+ dd if=/dev/urandom of=$DIR/$tfile bs=$1 count=2 seek=$2
+ before=$(md5sum $DIR/$tfile)
+ $LCTL set_param ldlm.namespaces.*osc*.lru_size=clear
+ after=$(md5sum $DIR/$tfile)
+ [ "$before" == "$after" ] ||
+ error "$csum: $before != $after bs=$1 seek=$2"
+ done
+ done
}
run_test 810 "partial page writes on ZFS (LU-11663)"
}
run_test 817 "nfsd won't cache write lock for exec file"
+test_818() {
+ mkdir $DIR/$tdir
+ $LFS setstripe -c1 -i0 $DIR/$tfile
+ $LFS setstripe -c1 -i1 $DIR/$tfile
+ stop $SINGLEMDS
+ #define OBD_FAIL_OSP_CANT_PROCESS_LLOG 0x2105
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80002105
+ start $SINGLEMDS $(mdsdevname ${SINGLEMDS//mds/}) $MDS_MOUNT_OPTS ||
+ error "start $SINGLEMDS failed"
+ rm -rf $DIR/$tdir
+}
+run_test 818 "unlink with failed llog"
+
#
# tests that do cleanup/setup should be run at the end
#