From a2e4fab07bc0412d593eebf3d8ce9d48687f6d56 Mon Sep 17 00:00:00 2001 From: Hongchao Zhang Date: Thu, 19 Oct 2023 14:33:47 +0800 Subject: [PATCH] LU-16097 quota: release preacquired quota when over limits The pre-acquired quota on each MDT or OST should be released when the whole quota is over limits, for instance, after the quota limits had been decreased for some quota ID by Administrator. Lustre-change: https://review.whamcloud.com/48576 Lustre-commit: 57ac32a22372065b789ca491a568f075e755d339 Test-Parameters: testlist=sanity-quota Test-Parameters: testlist=sanity-quota Signed-off-by: Hongchao Zhang Change-Id: I6263b835d4ae6a3fd03f9a2bc4f463949cbc74d4 Reviewed-by: Alexander Boyko Reviewed-by: Sergey Cheremencev Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/53070 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger --- lustre/include/uapi/linux/lustre/lustre_user.h | 1 + lustre/quota/lquota_internal.h | 3 +- lustre/quota/qmt_lock.c | 5 ++ lustre/quota/qsd_entry.c | 5 +- lustre/quota/qsd_handler.c | 18 ++++- lustre/quota/qsd_lock.c | 3 + lustre/quota/qsd_writeback.c | 5 ++ lustre/tests/sanity-quota.sh | 92 +++++++++++++++++++++----- 8 files changed, 110 insertions(+), 22 deletions(-) diff --git a/lustre/include/uapi/linux/lustre/lustre_user.h b/lustre/include/uapi/linux/lustre/lustre_user.h index 3506508..103fcbe 100644 --- a/lustre/include/uapi/linux/lustre/lustre_user.h +++ b/lustre/include/uapi/linux/lustre/lustre_user.h @@ -1389,6 +1389,7 @@ static inline __u64 lustre_stoqb(__kernel_size_t space) #define LQUOTA_FLAG_DEFAULT 0x0001 #define LQUOTA_FLAG_DELETED 0x0002 #define LQUOTA_FLAG_RESET 0x0004 +#define LQUOTA_FLAG_REVOKE 0x0008 #define LUSTRE_Q_CMD_IS_POOL(cmd) \ (cmd == LUSTRE_Q_GETQUOTAPOOL || \ diff --git a/lustre/quota/lquota_internal.h b/lustre/quota/lquota_internal.h index e7d1496..e1718cf 100644 --- a/lustre/quota/lquota_internal.h +++ b/lustre/quota/lquota_internal.h @@ -188,7 +188,8 @@ struct lquota_entry { lqe_is_default:1, /* the default quota is used */ lqe_is_global:1, /* lqe belongs to global pool "0x0"*/ lqe_is_deleted:1, /* lqe will be deleted soon */ - lqe_is_reset:1; /* lqe has been reset */ + lqe_is_reset:1, /* lqe has been reset */ + lqe_revoke:1; /* all extra grant will be revoked */ /* the lock to protect lqe_glbl_data */ struct mutex lqe_glbl_data_lock; diff --git a/lustre/quota/qmt_lock.c b/lustre/quota/qmt_lock.c index a85cc99..51bd429 100644 --- a/lustre/quota/qmt_lock.c +++ b/lustre/quota/qmt_lock.c @@ -829,6 +829,11 @@ void qmt_glb_lock_notify(const struct lu_env *env, struct lquota_entry *lqe, qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit; qti->qti_gl_desc.lquota_desc.gl_time = LQUOTA_GRACE_FLAG(0, LQUOTA_FLAG_RESET); + } else if (lqe->lqe_granted > lqe->lqe_hardlimit) { + qti->qti_gl_desc.lquota_desc.gl_hardlimit = lqe->lqe_hardlimit; + qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit; + qti->qti_gl_desc.lquota_desc.gl_time = LQUOTA_GRACE_FLAG(0, + LQUOTA_FLAG_REVOKE); } else { qti->qti_gl_desc.lquota_desc.gl_hardlimit = lqe->lqe_hardlimit; qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit; diff --git a/lustre/quota/qsd_entry.c b/lustre/quota/qsd_entry.c index 69d1b40..4a09adc 100644 --- a/lustre/quota/qsd_entry.c +++ b/lustre/quota/qsd_entry.c @@ -177,14 +177,15 @@ static void qsd_lqe_debug(struct lquota_entry *lqe, void *arg, struct qsd_qtype_info *qqi = (struct qsd_qtype_info *)arg; libcfs_debug_msg(msgdata, - "%pV qsd:%s qtype:%s id:%llu enforced:%d granted: %llu pending:%llu waiting:%llu req:%d usage: %llu qunit:%llu qtune:%llu edquot:%d default:%s\n", + "%pV qsd:%s qtype:%s id:%llu enforced:%d granted: %llu pending:%llu waiting:%llu req:%d usage: %llu qunit:%llu qtune:%llu edquot:%d default:%s revoke:%d\n", vaf, qqi->qqi_qsd->qsd_svname, qtype_name(qqi->qqi_qtype), lqe->lqe_id.qid_uid, lqe->lqe_enforced, lqe->lqe_granted, lqe->lqe_pending_write, lqe->lqe_waiting_write, lqe->lqe_pending_req, lqe->lqe_usage, lqe->lqe_qunit, lqe->lqe_qtune, - lqe->lqe_edquot, lqe->lqe_is_default ? "yes" : "no"); + lqe->lqe_edquot, lqe->lqe_is_default ? "yes" : "no", + lqe->lqe_revoke); } /* diff --git a/lustre/quota/qsd_handler.c b/lustre/quota/qsd_handler.c index a7e7644..b254b8d 100644 --- a/lustre/quota/qsd_handler.c +++ b/lustre/quota/qsd_handler.c @@ -233,7 +233,19 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody) /* valid per-ID lock * Apply good old quota qunit adjustment logic which has been around * since lustre 1.4: - * 1. release spare quota space? */ + * 1. revoke all extra grant + */ + if (lqe->lqe_revoke) { + lqe->lqe_revoke = 0; + + LQUOTA_DEBUG(lqe, "revoke pre-acquired quota: %llu - %llu\n", + granted, usage); + qbody->qb_count = granted - usage; + qbody->qb_flags = QUOTA_DQACQ_FL_REL; + RETURN(true); + } + + /* 2. release spare quota space? */ if (granted > usage + lqe->lqe_qunit) { /* pre-release quota space */ if (qbody == NULL) @@ -252,7 +264,7 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody) RETURN(true); } - /* 2. Any quota overrun? */ + /* 3. Any quota overrun? */ if (lqe->lqe_usage > lqe->lqe_granted) { /* we overconsumed quota space, we report usage in request so * that master can adjust it unconditionally */ @@ -263,7 +275,7 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody) qbody->qb_flags = QUOTA_DQACQ_FL_REPORT; } - /* 3. Time to pre-acquire? */ + /* 4. Time to pre-acquire? */ if (!lqe->lqe_edquot && !lqe->lqe_nopreacq && usage > 0 && lqe->lqe_qunit != 0 && granted < usage + lqe->lqe_qtune) { /* To pre-acquire quota space, we report how much spare quota diff --git a/lustre/quota/qsd_lock.c b/lustre/quota/qsd_lock.c index 5033f2e..3c63f35 100644 --- a/lustre/quota/qsd_lock.c +++ b/lustre/quota/qsd_lock.c @@ -513,6 +513,9 @@ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data) qsd_set_edquot(lqe, !!(desc->gl_flags & LQUOTA_FL_EDQUOT)); lqe_write_unlock(lqe); + if (!!(desc->gl_flags & LQUOTA_FL_EDQUOT)) + qsd_adjust_schedule(lqe, false, false); + if (wakeup) wake_up_all(&lqe->lqe_waiters); lqe_putref(lqe); diff --git a/lustre/quota/qsd_writeback.c b/lustre/quota/qsd_writeback.c index b322d36..b02521e 100644 --- a/lustre/quota/qsd_writeback.c +++ b/lustre/quota/qsd_writeback.c @@ -348,8 +348,13 @@ out_del: lqe->lqe_adjust_time = 0; spin_unlock(&qsd->qsd_adjust_lock); + if (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) & + LQUOTA_FLAG_REVOKE) + lqe->lqe_revoke = 1; + /* Report usage asynchronously */ rc = qsd_adjust(env, lqe); + lqe->lqe_revoke = 0; if (rc) LQUOTA_ERROR(lqe, "failed to report usage, rc:%d", rc); } diff --git a/lustre/tests/sanity-quota.sh b/lustre/tests/sanity-quota.sh index 5a0fac8..d700f05 100755 --- a/lustre/tests/sanity-quota.sh +++ b/lustre/tests/sanity-quota.sh @@ -478,50 +478,56 @@ reset_quota_settings() { get_quota_on_qsd() { local facet + local device local spec local qid local qtype local output facet=$1 - case "$2" in + device=$2 + case "$3" in usr) qtype="limit_user";; grp) qtype="limit_group";; - *) error "unknown quota parameter $2";; + prj) qtype="limit_project";; + *) error "unknown quota parameter $3";; esac - qid=$3 - case "$4" in + qid=$4 + case "$5" in hardlimit) spec=4;; softlimit) spec=6;; - *) error "unknown quota parameter $4";; + *) error "unknown quota parameter $5";; esac - do_facet $facet $LCTL get_param osd-*.*-OST0000.quota_slave.$qtype | + do_facet $facet $LCTL get_param osd-*.*-${device}.quota_slave.$qtype | awk '($3 == '$qid') {getline; print $'$spec'; exit;}' | tr -d , } -wait_quota_setting_synced() { +wait_quota_synced() { local value - local qtype=$1 - local qid=$2 - local limit_type=$3 - local limit_val=$4 + local facet=$1 + local device=$2 + local qtype=$3 + local qid=$4 + local limit_type=$5 + local limit_val=$6 local interval=0 - value=$(get_quota_on_qsd ost1 $qtype $qid $limit_type) + value=$(get_quota_on_qsd $facet $device $qtype $qid $limit_type) while [[ $value != $limit_val ]]; do (( interval != 0 )) || - do_facet ost1 $LCTL set_param \ - osd-*.*-OST0000.quota_slave.force_reint=1 + do_facet $facet $LCTL set_param \ + osd-*.*-${device}.quota_slave.force_reint=1 + echo $value (( interval <= 20 )) || error "quota ($value) don't update on QSD, $limit_val" interval=$((interval + 1)) sleep 1 - value=$(get_quota_on_qsd ost1 $qtype $qid $limit_type) + value=$(get_quota_on_qsd $facet $device $qtype $qid $limit_type) done } @@ -5781,7 +5787,7 @@ test_84() error "set user quota failed" $LFS quota -gv $TSTUSR $DIR - wait_quota_setting_synced "grp" $TSTID "hardlimit" $((10*1024*1024)) + wait_quota_synced "ost1" "OST0000" "grp" $TSTID "hardlimit" "10485760" mkdir -p $dir1 || error "failed to mkdir" chown $TSTUSR.$TSTUSR $dir1 || error "chown $dir1 failed" @@ -5888,6 +5894,60 @@ test_84() } run_test 84 "Reset quota should fix the insane granted quota" +test_preacquired_quota() +{ + local test_dir=$1 + local qtype=$2 + local qtype_name=$3 + local qid=$4 + + [[ "$qtype" == "-p" ]] && change_project -sp $qid $DIR/$tdir + + $LFS setquota $qtype $qid -i 100K -I 100K $DIR || + error "failed to set file [$qtype] quota" + + $RUNAS createmany -m $test_dir/tfile- 5000 || + error "failed to create files, expect succeed" + + wait_zfs_commit $SINGLEMDS + $LFS setquota $qtype $qid -i 2K -I 2K $DIR || + error "failed to decrease file [$qtype] quota" + + wait_quota_synced "mds1" "MDT0000" $qtype_name $qid "hardlimit" "2048" + + # make sure the lqe->lqe_edquot is set + $RUNAS createmany -m $test_dir/tfile2- 10 + sleep 5 + + $RUNAS createmany -m $test_dir/tfile3- 30 && + error "succeed to create files, expect failed" + + rm -f $test_dir/tfile* + $LFS setquota $qtype $qid -i 0 -I 0 $DIR || + error "failed to reset file user quota" +} + +test_86() +{ + (( $MDS1_VERSION >= $(version_code 2.14.0.113) )) || + skip "need MDS >= 2.14.0.113 for quota over limit release fix" + + local test_dir="$DIR/$tdir/test_dir" + + setup_quota_test || error "setup quota failed with %?" + set_mdt_qtype $QTYPE || error "enable mdt quota failed" + + $LFS setdirstripe -c 1 -i 0 $test_dir || error "setdirstripe failed" + chmod 777 $test_dir + + test_preacquired_quota "$test_dir" "-u" "usr" "$TSTID" + test_preacquired_quota "$test_dir" "-g" "grp" "$TSTID" + + is_project_quota_supported || return 0 + test_preacquired_quota "$test_dir" "-p" "prj" "1000" +} +run_test 86 "Pre-acquired quota should be released if quota is over limit" + quota_fini() { do_nodes $(comma_list $(nodes_list)) \ -- 1.8.3.1