init_logging
DIRECTIO=${DIRECTIO:-$LUSTRE/tests/directio}
-[ $MDSCOUNT -gt 1 ] && skip "CMD case" && exit 0
-
require_dsh_mds || exit 0
require_dsh_ost || exit 0
exec $LUSTRE/tests/sanity-quota-old.sh
fi
-# if e2fsprogs support quota feature?
-if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] && \
- ! $DEBUGFS -c -R supported_features | grep -q 'quota'; then
+# Does e2fsprogs support quota feature?
+if [ $(facet_fstype $SINGLEMDS) == ldiskfs ] &&
+ do_facet $SINGLEMDS "! $DEBUGFS -c -R supported_features |
+ grep -q 'quota'"; then
skip "e2fsprogs doesn't support quota" && exit 0
fi
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="9 18 21"
+# bug number for skipped test: LU-4515
+ALWAYS_EXCEPT="$ALWAYS_EXCEPT 34"
+
+if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+# bug number for skipped test: LU-2872 LU-2836 LU-2836 LU-2059
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT 1 3 6 7d"
+# bug number: LU-2887
+ ZFS_SLOW="12a"
+fi
+
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="$ZFS_SLOW 9 18 21"
QUOTALOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh).log}
build_test_filter
lustre_fail() {
- local fail_node=$1
+ local fail_node=$1
local fail_loc=$2
local fail_val=${3:-0}
+ local NODES=
- if [ $fail_node == "mds" ] || [ $fail_node == "mds_ost" ]; then
- do_facet $SINGLEMDS "lctl set_param fail_val=$fail_val"
- do_facet $SINGLEMDS "lctl set_param fail_loc=$fail_loc"
- fi
+ case $fail_node in
+ mds_ost|mdt_ost) NODES="$(comma_list $(mdts_nodes) $(osts_nodes))";;
+ mds|mdt) NODES="$(comma_list $(mdts_nodes))";;
+ ost) NODES="$(comma_list $(osts_nodes))";;
+ esac
- if [ $fail_node == "ost" ] || [ $fail_node == "mds_ost" ]; then
- for num in `seq $OSTCOUNT`; do
- do_facet ost$num "lctl set_param fail_val=$fail_val"
- do_facet ost$num "lctl set_param fail_loc=$fail_loc"
- done
- fi
+ do_nodes $NODES "lctl set_param fail_val=$fail_val fail_loc=$fail_loc"
}
RUNAS="runas -u $TSTID -g $TSTID"
# usage: set_mdt_qtype ug|u|g|none
set_mdt_qtype() {
local qtype=$1
- local varsvc=${SINGLEMDS}_svc
+ local varsvc
+ local mdts=$(get_facets MDS)
local cmd
do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$qtype
- if $(facet_up $SINGLEMDS); then
+ # we have to make sure each MDT received config changes
+ for mdt in ${mdts//,/ }; do
+ varsvc=${mdt}_svc
cmd="$LCTL get_param -n "
- cmd=${cmd}osd-$(facet_fstype $SINGLEMDS).${!varsvc}
+ cmd=${cmd}osd-$(facet_fstype $mdt).${!varsvc}
cmd=${cmd}.quota_slave.enabled
- wait_update_facet $SINGLEMDS "$cmd" "$qtype" || return 1
- fi
+ if $(facet_up $mdt); then
+ wait_update_facet $mdt "$cmd" "$qtype" || return 1
+ fi
+ done
return 0
}
# set ost quota type
-# usage: set_ost_quota_type ug|u|g|none
+# usage: set_ost_qtype ug|u|g|none
set_ost_qtype() {
local qtype=$1
local varsvc
wait_reintegration() {
local ntype=$1
local qtype=$2
+ local max=$3
local result="glb[1],slv[1],reint[0]"
local varsvc
local cmd
+ local tgts
if [ $ntype == "mdt" ]; then
- varsvc=${SINGLEMDS}_svc
+ tgts=$(get_facets MDS)
+ else
+ tgts=$(get_facets OST)
+ fi
+
+ for tgt in ${tgts//,/ }; do
+ varsvc=${tgt}_svc
cmd="$LCTL get_param -n "
- cmd=${cmd}osd-$(facet_fstype $SINGLEMDS).${!varsvc}
+ cmd=${cmd}osd-$(facet_fstype $tgt).${!varsvc}
cmd=${cmd}.quota_slave.info
- if $(facet_up $SINGLEMDS); then
- wait_update_facet $SINGLEMDS "$cmd |
- grep "$qtype" | awk '{ print \\\$3 }'" "$result" ||
- return 1
- fi
- else
- local osts=$(get_facets OST)
- for ost in ${osts//,/ }; do
- varsvc=${ost}_svc
- cmd="$LCTL get_param -n "
- cmd=${cmd}osd-$(facet_fstype $ost).${!varsvc}
- cmd=${cmd}.quota_slave.info
-
- if $(facet_up $ost); then
- wait_update_facet $ost "$cmd |
+ if $(facet_up $tgt); then
+ wait_update_facet $tgt "$cmd |
grep "$qtype" | awk '{ print \\\$3 }'" \
- "$result" || return 1
- fi
- done
- fi
+ "$result" $max || return 1
+ fi
+ done
return 0
}
wait_mdt_reint() {
local qtype=$1
+ local max=${2:-90}
if [ $qtype == "u" ] || [ $qtype == "ug" ]; then
- wait_reintegration "mdt" "user" || return 1
+ wait_reintegration "mdt" "user" $max || return 1
fi
if [ $qtype == "g" ] || [ $qtype == "ug" ]; then
- wait_reintegration "mdt" "group" || return 1
+ wait_reintegration "mdt" "group" $max || return 1
fi
return 0
}
wait_ost_reint() {
local qtype=$1
+ local max=${2:-90}
if [ $qtype == "u" ] || [ $qtype == "ug" ]; then
- wait_reintegration "ost" "user" || return 1
+ wait_reintegration "ost" "user" $max || return 1
fi
if [ $qtype == "g" ] || [ $qtype == "ug" ]; then
- wait_reintegration "ost" "group" || return 1
+ wait_reintegration "ost" "group" $max || return 1
fi
return 0
}
wait_delete_completed
echo "Creating test directory"
mkdir -p $DIR/$tdir
- chmod 077 $DIR/$tdir
+ chmod 0777 $DIR/$tdir
# always clear fail_loc in case of fail_loc isn't cleared
# properly when previous test failed
lustre_fail mds_ost 0
}
quota_init
+resetquota -u $TSTUSR
+resetquota -g $TSTUSR
+resetquota -u $TSTUSR2
+resetquota -g $TSTUSR2
+
test_quota_performance() {
local TESTFILE="$DIR/$tdir/$tfile-0"
local size=$1 # in MB
delta=$((etime - stime))
if [ $delta -gt 0 ]; then
rate=$((size * 1024 / delta))
- [ $rate -gt 1024 ] ||
- error "SLOW IO for $TSTUSR (user): $rate KB/sec"
+ if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
+ # LU-2872 - see LU-2887 for fix
+ [ $rate -gt 64 ] ||
+ error "SLOW IO for $TSTUSR (user): $rate KB/sec"
+ else
+ [ $rate -gt 1024 ] ||
+ error "SLOW IO for $TSTUSR (user): $rate KB/sec"
+ fi
fi
rm -f $TESTFILE
}
local MB=100 # 100M
[ "$SLOW" = "no" ] && MB=10
- local free_space=$(lfs df | grep "filesystem summary" | \
- awk '{print $5}')
+ local free_space=$(lfs_df | grep "summary" | awk '{print $4}')
[ $free_space -le $((MB * 1024)) ] &&
skip "not enough space ${free_space} KB, " \
"required $((MB * 1024)) KB" && return
[ "$SLOW" = "no" ] && LIMIT=1024 # 1k inodes
- local FREE_INODES=$(lfs df -i | grep "filesystem summary" | \
- awk '{print $5}')
+ local FREE_INODES=$(mdt_free_inodes 0)
+ echo "$FREE_INODES free inodes on master MDT"
[ $FREE_INODES -lt $LIMIT ] &&
skip "not enough free inodes $FREE_INODES required $LIMIT" &&
return
# block soft limit
test_3() {
local LIMIT=1 # 1MB
- local GRACE=10 # 10s
+ local GRACE=20 # 20s
local TESTFILE=$DIR/$tdir/$tfile-0
set_ost_qtype "ug" || error "enable ost quota failed"
test_6() {
local LIMIT=3 # 3M
+ # Clear dmesg so watchdog is not triggered by previous
+ # test output
+ do_facet ost1 dmesg -c > /dev/null
+
setup_quota_test
trap cleanup_quota_test EXIT
# no watchdog is triggered
do_facet ost1 dmesg > $TMP/lustre-log-${TESTNAME}.log
- watchdog=$(awk '/sanity-quota test 6/ {start = 1;}
- /Service thread pid/ && /was inactive/ {
- if (start) {
- print;
- }
- }' $TMP/lustre-log-${TESTNAME}.log)
+ watchdog=$(awk '/Service thread pid/ && /was inactive/ \
+ { print; }' $TMP/lustre-log-${TESTNAME}.log)
[ -z "$watchdog" ] || error "$watchdog"
rm -f $TMP/lustre-log-${TESTNAME}.log
- # write should continue & succeed
+ # write should continue then fail with EDQUOT
local count=0
+ local c_size
while [ true ]; do
if ! ps -p ${DDPID} > /dev/null 2>&1; then break; fi
- if [ $count -ge 120 ]; then
+ if [ $count -ge 240 ]; then
quota_error u $TSTUSR "dd not finished in $count secs"
fi
count=$((count + 1))
- [ $((count % 10)) -eq 0 ] && echo "Waiting $count secs"
+ if [ $((count % 30)) -eq 0 ]; then
+ c_size=$(stat -c %s $TESTFILE)
+ echo "Waiting $count secs. $c_size"
+ $SHOW_QUOTA_USER
+ fi
sleep 1
done
# define OBD_FAIL_QUOTA_DELAY_REINT 0xa03
lustre_fail ost 0xa03
- # enable ost quota to trigger reintegration
+ # enable ost quota
set_ost_qtype "ug" || error "enable ost quota failed"
+ # trigger reintegration
+ local procf="osd-$(facet_fstype ost1).$FSNAME-OST*."
+ procf=${procf}quota_slave.force_reint
+ do_facet ost1 $LCTL set_param $procf=1 ||
+ error "force reintegration failed"
echo "Stop mds..."
stop mds1
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS
quota_init
- wait_ost_reint "ug" || error "reintegration failed"
+ # wait longer than usual to make sure the reintegration
+ # is triggered by quota wb thread.
+ wait_ost_reint "ug" 200 || error "reintegration failed"
# hardlimit should have been fetched by slave during global
# reintegration, write will exceed quota
trap cleanup_quota_test EXIT
set_ost_qtype "none" || error "disable ost quota failed"
- # LU-2284. Enable trace for debug log.
- do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=+trace"
$LFS setquota -u $TSTUSR -B ${limit}M $DIR ||
error "set quota for $TSTUSR failed"
$LFS setquota -u $TSTUSR2 -B ${limit}M $DIR ||
error "set quota for $TSTUSR2 failed"
- do_nodes $(comma_list $(nodes_list)) "lctl set_param debug=-trace"
#define OBD_FAIL_OBD_IDX_READ_BREAK 0x608
lustre_fail mds 0x608 0
}
run_test 7d "Quota reintegration (Transfer index in multiple bulks)"
+# quota reintegration (inode limits)
+test_7e() {
+ [ "$MDSCOUNT" -lt "2" ] && skip "Required more MDTs" && return
+
+ local ilimit=$((1024 * 2)) # 2k inodes
+ local TESTFILE=$DIR/${tdir}-1/$tfile
+
+ setup_quota_test
+ trap cleanup_quota_test EXIT
+
+ # make sure the system is clean
+ local USED=$(getquota -u $TSTUSR global curinodes)
+ [ $USED -ne 0 ] && error "Used inode($USED) for user $TSTUSR isn't 0."
+
+ # make sure no granted quota on mdt1
+ set_mdt_qtype "ug" || error "enable mdt quota failed"
+ resetquota -u $TSTUSR
+ set_mdt_qtype "none" || error "disable mdt quota failed"
+
+ local MDTUUID=$(mdtuuid_from_index $((MDSCOUNT - 1)))
+ USED=$(getquota -u $TSTUSR $MDTUUID ihardlimit)
+ [ $USED -ne 0 ] && error "limit($USED) on $MDTUUID for user" \
+ "$TSTUSR isn't 0."
+
+ echo "Stop mds${MDSCOUNT}..."
+ stop mds${MDSCOUNT}
+
+ echo "Enable quota & set quota limit for $TSTUSR"
+ set_mdt_qtype "ug" || error "enable mdt quota failed"
+ $LFS setquota -u $TSTUSR -b 0 -B 0 -i 0 -I $ilimit $DIR ||
+ error "set quota failed"
+
+ echo "Start mds${MDSCOUNT}..."
+ start mds${MDSCOUNT} $(mdsdevname $MDSCOUNT) $MDS_MOUNT_OPTS
+ quota_init
+
+ wait_mdt_reint "ug" || error "reintegration failed"
+
+ echo "create remote dir"
+ $LFS mkdir -i $((MDSCOUNT - 1)) $DIR/${tdir}-1 ||
+ error "create remote dir failed"
+ chmod 0777 $DIR/${tdir}-1
+
+ # hardlimit should have been fetched by slave during global
+ # reintegration, create will exceed quota
+ $RUNAS createmany -m $TESTFILE $((ilimit + 1)) &&
+ quota_error u $TSTUSR "create succeeded, expect EDQUOT"
+
+ $RUNAS unlinkmany $TESTFILE $ilimit || "unlink files failed"
+ wait_delete_completed
+ sync_all_data || true
+
+ echo "Stop mds${MDSCOUNT}..."
+ stop mds${MDSCOUNT}
+
+ $LFS setquota -u $TSTUSR -b 0 -B 0 -i 0 -I 0 $DIR ||
+ error "clear quota failed"
+
+ echo "Start mds${MDSCOUNT}..."
+ start mds${MDSCOUNT} $(mdsdevname $MDSCOUNT) $MDS_MOUNT_OPTS
+ quota_init
+
+ wait_mdt_reint "ug" || error "reintegration failed"
+
+ # hardlimit should be cleared on slave during reintegration
+ $RUNAS createmany -m $TESTFILE $((ilimit + 1)) ||
+ quota_error -u $TSTUSR "create failed, expect success"
+
+ $RUNAS unlinkmany $TESTFILE $((ilimit + 1)) || "unlink failed"
+ rmdir $DIR/${tdir}-1 || "unlink remote dir failed"
+
+ cleanup_quota_test
+ resetquota -u $TSTUSR
+}
+run_test 7e "Quota reintegration (inode limits)"
+
# run dbench with quota enabled
test_8() {
local BLK_LIMIT="100g" #100G
# run for fixing bug10707, it needs a big room. test for 64bit
test_9() {
- local filesize=$((1024 * 1024 * 1024 * 9 / 2)) # 4.5G
+ local filesize=$((1024 * 9 / 2)) # 4.5G
check_whether_skip && return 0
}
run_test 11 "Chown/chgrp ignores quota"
-test_12() {
+test_12a() {
[ "$OSTCOUNT" -lt "2" ] && skip "skipping rebalancing test" && return
local blimit=22 # 22M
cleanup_quota_test
resetquota -u $TSTUSR
}
-run_test 12 "Block quota rebalancing"
+run_test 12a "Block quota rebalancing"
+
+test_12b() {
+ [ "$MDSCOUNT" -lt "2" ] && skip "skipping rebalancing test" && return
+
+ local ilimit=$((1024 * 2)) # 2k inodes
+ local TESTFILE0=$DIR/$tdir/$tfile
+ local TESTFILE1=$DIR/${tdir}-1/$tfile
+
+ setup_quota_test
+ trap cleanup_quota_test EXIT
+
+ $LFS mkdir -i 1 $DIR/${tdir}-1 || error "create remote dir failed"
+ chmod 0777 $DIR/${tdir}-1
+
+ set_mdt_qtype "u" || "enable mdt quota failed"
+ quota_show_check f u $TSTUSR
+
+ $LFS setquota -u $TSTUSR -b 0 -B 0 -i 0 -I $ilimit $DIR ||
+ error "set quota failed"
+
+ echo "Create $ilimit files on mdt0..."
+ $RUNAS createmany -m $TESTFILE0 $ilimit ||
+ quota_error u $TSTUSR "create failed, but expect success"
+
+ echo "Create files on mdt1..."
+ $RUNAS createmany -m $TESTFILE1 1 &&
+ quota_error a $TSTUSR "create succeeded, expect EDQUOT"
+
+ echo "Free space from mdt0..."
+ $RUNAS unlinkmany $TESTFILE0 $ilimit || error "unlink mdt0 files failed"
+ wait_delete_completed
+ sync_all_data || true
+
+ echo "Create files on mdt1 after space freed from mdt0..."
+ $RUNAS createmany -m $TESTFILE1 $((ilimit / 2)) ||
+ quota_error a $TSTUSR "rebalancing failed"
+
+ $RUNAS unlinkmany $TESTFILE1 $((ilimit / 2)) ||
+ error "unlink mdt1 files failed"
+ rmdir $DIR/${tdir}-1 || error "unlink remote dir failed"
+
+ cleanup_quota_test
+ resetquota -u $TSTUSR
+}
+run_test 12b "Inode quota rebalancing"
test_13(){
local TESTFILE=$DIR/$tdir/$tfile
- # the name of osp on ost1 name is MDT0000-osp-OST0000
- local procf="ldlm.namespaces.*MDT0000-osp-OST0000.lru_size"
+ # the name of lwp on ost1 name is MDT0000-lwp-OST0000
+ local procf="ldlm.namespaces.*MDT0000-lwp-OST0000.lru_size"
setup_quota_test
trap cleanup_quota_test EXIT
local testfile_size=$(stat -c %s $TESTFILE)
if [ $testfile_size -ne $((BLK_SZ * 1024 * 100)) ] ; then
- quota_error u $TSTUSR "expect $((BLK_SZ * 1024 * 100)),"
+ quota_error u $TSTUSR "expect $((BLK_SZ * 1024 * 100))," \
"got ${testfile_size}. Verifying file failed!"
fi
cleanup_quota_test
# test when mds does failover, the ost still could work well
# this test shouldn't trigger watchdog b=14840
test_18() {
+ # Clear dmesg so watchdog is not triggered by previous
+ # test output
+ do_facet ost1 dmesg -c > /dev/null
+
test_18_sub normal
test_18_sub directio
# check if watchdog is triggered
do_facet ost1 dmesg > $TMP/lustre-log-${TESTNAME}.log
- local watchdog=$(awk '/sanity-quota test 18/ {start = 1;}
- /Service thread pid/ && /was inactive/ {
- if (start) {
- print;
- }
- }' $TMP/lustre-log-${TESTNAME}.log)
+ local watchdog=$(awk '/Service thread pid/ && /was inactive/ \
+ { print; }' $TMP/lustre-log-${TESTNAME}.log)
[ -z "$watchdog" ] || error "$watchdog"
rm -f $TMP/lustre-log-${TESTNAME}.log
}
}
run_test 27b "lfs quota/setquota should handle user/group ID (b20200)"
+test_27c() {
+ local limit
+
+ $LFS setquota -u $TSTID -b 30M -B 3T $DIR ||
+ error "lfs setquota failed"
+
+ limit=$($LFS quota -u $TSTID -v -h $DIR | grep $DIR | awk '{print $3}')
+ [ $limit != "30M" ] && error "softlimit $limit isn't human-readable"
+ limit=$($LFS quota -u $TSTID -v -h $DIR | grep $DIR | awk '{print $4}')
+ [ $limit != "3T" ] && error "hardlimit $limit isn't human-readable"
+
+ $LFS setquota -u $TSTID -b 1500M -B 18500G $DIR ||
+ error "lfs setquota for $TSTID failed"
+
+ limit=$($LFS quota -u $TSTID -v -h $DIR | grep $DIR | awk '{print $3}')
+ [ $limit != "1.465G" ] && error "wrong softlimit $limit"
+ limit=$($LFS quota -u $TSTID -v -h $DIR | grep $DIR | awk '{print $4}')
+ [ $limit != "18.07T" ] && error "wrong hardlimit $limit"
+
+ $LFS quota -u $TSTID -v -h $DIR | grep -q "Total allocated" ||
+ error "total allocated inode/block limit not printed"
+
+ resetquota -u $TSTUSR
+}
+run_test 27c "lfs quota should support human-readable output"
+
test_30() {
local output
local LIMIT=4 # 4MB
# over-quota flag has not yet settled since we do not trigger async
# events based on grace time period expiration
$SHOW_QUOTA_USER
- $RUNAS $DD of=$TESTFILE conv=notrunc oflag=append count=1 || true
+ $RUNAS $DD of=$TESTFILE conv=notrunc oflag=append count=4 || true
cancel_lru_locks osc
# now over-quota flag should be settled and further writes should fail
$SHOW_QUOTA_USER
- $RUNAS $DD of=$TESTFILE conv=notrunc oflag=append count=1 &&
+ $RUNAS $DD of=$TESTFILE conv=notrunc oflag=append count=4 &&
error "grace times were reset"
# cleanup
cleanup_quota_test
do_node $mdt0_node mkdir $mntpt/OBJECTS
do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.usr $mntpt/OBJECTS
do_node $mdt0_node cp $LUSTRE/tests/admin_quotafile_v2.grp $mntpt/OBJECTS
- do_node $mdt0_node umount -f $mntpt
+ do_node $mdt0_node umount -d -f $mntpt
echo "Setup all..."
setupall