Whamcloud - gitweb
LU-16097 quota: release preacquired quota when over limits 76/48576/25
authorHongchao Zhang <hongchao@whamcloud.com>
Thu, 19 Oct 2023 06:33:47 +0000 (14:33 +0800)
committerOleg Drokin <green@whamcloud.com>
Wed, 8 Nov 2023 22:01:27 +0000 (22:01 +0000)
The pre-acquired quota on each MDT or OST should be released when
the whole quota is over limits, for instance, after the quota limits
had been decreased for some quota ID by Administrator.

Test-Parameters: testlist=sanity-quota ossversion=2.15.3
Test-Parameters: testlist=sanity-quota mdsversion=2.15.3
Signed-off-by: Hongchao Zhang <hongchao@whamcloud.com>
Change-Id: I6263b835d4ae6a3fd03f9a2bc4f463949cbc74d4
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/48576
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Alexander Boyko <alexander.boyko@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Sergey Cheremencev <scherementsev@ddn.com>
lustre/include/uapi/linux/lustre/lustre_user.h
lustre/quota/lquota_internal.h
lustre/quota/qmt_lock.c
lustre/quota/qsd_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_writeback.c
lustre/tests/sanity-quota.sh

index 113dc45..a4863a6 100644 (file)
@@ -1415,6 +1415,7 @@ static inline __u64 toqb(__kernel_size_t space)
 #define LQUOTA_FLAG_DEFAULT    0x0001
 #define LQUOTA_FLAG_DELETED    0x0002
 #define LQUOTA_FLAG_RESET      0x0004
+#define LQUOTA_FLAG_REVOKE     0x0008
 
 #define LUSTRE_Q_CMD_IS_POOL(cmd)              \
        (cmd == LUSTRE_Q_GETQUOTAPOOL ||        \
index e4a481c..a0b3c6a 100644 (file)
@@ -189,7 +189,8 @@ struct lquota_entry {
                        lqe_is_default:1, /* the default quota is used */
                        lqe_is_global:1,  /* lqe belongs to global pool "0x0"*/
                        lqe_is_deleted:1, /* lqe will be deleted soon */
-                       lqe_is_reset:1;   /* lqe has been reset */
+                       lqe_is_reset:1,   /* lqe has been reset */
+                       lqe_revoke:1;     /* all extra grant will be revoked */
 
        /* the lock to protect lqe_glbl_data */
        struct mutex             lqe_glbl_data_lock;
index 9935897..1b42537 100644 (file)
@@ -824,6 +824,11 @@ void qmt_glb_lock_notify(const struct lu_env *env, struct lquota_entry *lqe,
                qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit;
                qti->qti_gl_desc.lquota_desc.gl_time = LQUOTA_GRACE_FLAG(0,
                                                        LQUOTA_FLAG_RESET);
+       } else if (lqe->lqe_granted > lqe->lqe_hardlimit) {
+               qti->qti_gl_desc.lquota_desc.gl_hardlimit = lqe->lqe_hardlimit;
+               qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit;
+               qti->qti_gl_desc.lquota_desc.gl_time = LQUOTA_GRACE_FLAG(0,
+                                                       LQUOTA_FLAG_REVOKE);
        } else {
                qti->qti_gl_desc.lquota_desc.gl_hardlimit = lqe->lqe_hardlimit;
                qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit;
index 625f122..a2f0154 100644 (file)
@@ -177,14 +177,15 @@ static void qsd_lqe_debug(struct lquota_entry *lqe, void *arg,
        struct qsd_qtype_info   *qqi = (struct qsd_qtype_info *)arg;
 
        libcfs_debug_msg(msgdata,
-                        "%pV qsd:%s qtype:%s id:%llu enforced:%d granted: %llu pending:%llu waiting:%llu req:%d usage: %llu qunit:%llu qtune:%llu edquot:%d default:%s\n",
+                        "%pV qsd:%s qtype:%s id:%llu enforced:%d granted: %llu pending:%llu waiting:%llu req:%d usage: %llu qunit:%llu qtune:%llu edquot:%d default:%s revoke:%d\n",
                         vaf,
                         qqi->qqi_qsd->qsd_svname, qtype_name(qqi->qqi_qtype),
                         lqe->lqe_id.qid_uid, lqe->lqe_enforced,
                         lqe->lqe_granted, lqe->lqe_pending_write,
                         lqe->lqe_waiting_write, lqe->lqe_pending_req,
                         lqe->lqe_usage, lqe->lqe_qunit, lqe->lqe_qtune,
-                        lqe->lqe_edquot, lqe->lqe_is_default ? "yes" : "no");
+                        lqe->lqe_edquot, lqe->lqe_is_default ? "yes" : "no",
+                        lqe->lqe_revoke);
 }
 
 /*
index 57fe0c0..7e19ccf 100644 (file)
@@ -233,7 +233,19 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody)
        /* valid per-ID lock
         * Apply good old quota qunit adjustment logic which has been around
         * since lustre 1.4:
-        * 1. release spare quota space? */
+        * 1. revoke all extra grant
+        */
+       if (lqe->lqe_revoke) {
+               lqe->lqe_revoke = 0;
+
+               LQUOTA_DEBUG(lqe, "revoke pre-acquired quota: %llu - %llu\n",
+                            granted, usage);
+               qbody->qb_count = granted - usage;
+               qbody->qb_flags = QUOTA_DQACQ_FL_REL;
+               RETURN(true);
+       }
+
+       /* 2. release spare quota space? */
        if (granted > usage + lqe->lqe_qunit) {
                /* pre-release quota space */
                if (qbody == NULL)
@@ -252,7 +264,7 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody)
                RETURN(true);
        }
 
-       /* 2. Any quota overrun? */
+       /* 3. Any quota overrun? */
        if (lqe->lqe_usage > lqe->lqe_granted) {
                /* we overconsumed quota space, we report usage in request so
                 * that master can adjust it unconditionally */
@@ -263,7 +275,7 @@ static bool qsd_calc_adjust(struct lquota_entry *lqe, struct quota_body *qbody)
                qbody->qb_flags = QUOTA_DQACQ_FL_REPORT;
        }
 
-       /* 3. Time to pre-acquire? */
+       /* 4. Time to pre-acquire? */
        if (!lqe->lqe_edquot && !lqe->lqe_nopreacq && usage > 0 &&
            lqe->lqe_qunit != 0 && granted < usage + lqe->lqe_qtune) {
                /* To pre-acquire quota space, we report how much spare quota
index 6bb125b..646f2c3 100644 (file)
@@ -513,6 +513,9 @@ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data)
        qsd_set_edquot(lqe, !!(desc->gl_flags & LQUOTA_FL_EDQUOT));
        lqe_write_unlock(lqe);
 
+       if (!!(desc->gl_flags & LQUOTA_FL_EDQUOT))
+               qsd_adjust_schedule(lqe, false, false);
+
        if (wakeup)
                wake_up(&lqe->lqe_waiters);
        lqe_putref(lqe);
index 21bc53f..823c0bb 100644 (file)
@@ -348,8 +348,13 @@ out_del:
                lqe->lqe_adjust_time = 0;
                spin_unlock(&qsd->qsd_adjust_lock);
 
+               if (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) &
+                                                       LQUOTA_FLAG_REVOKE)
+                       lqe->lqe_revoke = 1;
+
                /* Report usage asynchronously */
                rc = qsd_adjust(env, lqe);
+               lqe->lqe_revoke = 0;
                if (rc)
                        LQUOTA_ERROR(lqe, "failed to report usage, rc:%d", rc);
        }
index c643f09..028e9f2 100755 (executable)
@@ -499,50 +499,56 @@ reset_quota_settings() {
 
 get_quota_on_qsd() {
        local facet
+       local device
        local spec
        local qid
        local qtype
        local output
 
        facet=$1
-       case "$2" in
+       device=$2
+       case "$3" in
                usr) qtype="limit_user";;
                grp) qtype="limit_group";;
-               *)         error "unknown quota parameter $2";;
+               prj) qtype="limit_project";;
+               *)         error "unknown quota parameter $3";;
        esac
 
-       qid=$3
-       case "$4" in
+       qid=$4
+       case "$5" in
                hardlimit) spec=4;;
                softlimit) spec=6;;
-               *)         error "unknown quota parameter $4";;
+               *)         error "unknown quota parameter $5";;
        esac
 
-       do_facet $facet $LCTL get_param osd-*.*-OST0000.quota_slave.$qtype |
+       do_facet $facet $LCTL get_param osd-*.*-${device}.quota_slave.$qtype |
                awk '($3 == '$qid') {getline; print $'$spec'; exit;}' | tr -d ,
 }
 
-wait_quota_setting_synced() {
+wait_quota_synced() {
        local value
-       local qtype=$1
-       local qid=$2
-       local limit_type=$3
-       local limit_val=$4
+       local facet=$1
+       local device=$2
+       local qtype=$3
+       local qid=$4
+       local limit_type=$5
+       local limit_val=$6
        local interval=0
 
-       value=$(get_quota_on_qsd ost1 $qtype $qid $limit_type)
+       value=$(get_quota_on_qsd $facet $device $qtype $qid $limit_type)
        while [[ $value != $limit_val ]]; do
                (( interval != 0 )) ||
-                       do_facet ost1 $LCTL set_param \
-                               osd-*.*-OST0000.quota_slave.force_reint=1
+                       do_facet $facet $LCTL set_param \
+                               osd-*.*-${device}.quota_slave.force_reint=1
 
+               echo $value
                (( interval <= 20 )) ||
                        error "quota ($value) don't update on QSD, $limit_val"
 
                interval=$((interval + 1))
                sleep 1
 
-               value=$(get_quota_on_qsd ost1 $qtype $qid $limit_type)
+               value=$(get_quota_on_qsd $facet $device $qtype $qid $limit_type)
        done
 }
 
@@ -5953,7 +5959,7 @@ test_84()
                 error "set user quota failed"
        $LFS quota -gv $TSTUSR $DIR
 
-       wait_quota_setting_synced "grp" $TSTID "hardlimit" $((10*1024*1024))
+       wait_quota_synced "ost1" "OST0000" "grp" $TSTID "hardlimit" "10485760"
 
        mkdir -p $dir1 || error "failed to mkdir"
        chown $TSTUSR.$TSTUSR $dir1 || error "chown $dir1 failed"
@@ -6102,6 +6108,60 @@ test_85()
 }
 run_test 85 "do not hung at write with the least_qunit"
 
+test_preacquired_quota()
+{
+       local test_dir=$1
+       local qtype=$2
+       local qtype_name=$3
+       local qid=$4
+
+       [[ "$qtype" == "-p" ]] && change_project -sp $qid $DIR/$tdir
+
+       $LFS setquota $qtype $qid -i 100K -I 100K $DIR ||
+               error "failed to set file [$qtype] quota"
+
+       $RUNAS createmany -m $test_dir/tfile- 5000 ||
+               error "failed to create files, expect succeed"
+
+       wait_zfs_commit $SINGLEMDS
+       $LFS setquota $qtype $qid -i 2K -I 2K $DIR ||
+               error "failed to decrease file [$qtype] quota"
+
+       wait_quota_synced "mds1" "MDT0000" $qtype_name $qid "hardlimit" "2048"
+
+       # make sure the lqe->lqe_edquot is set
+       $RUNAS createmany -m $test_dir/tfile2- 10
+       sleep 5
+
+       $RUNAS createmany -m $test_dir/tfile3- 30 &&
+               error "succeed to create files, expect failed"
+
+       rm -f $test_dir/tfile*
+       $LFS setquota $qtype $qid -i 0 -I 0 $DIR ||
+               error "failed to reset file user quota"
+}
+
+test_86()
+{
+       (( $MDS1_VERSION >= $(version_code 2.15.57.41) )) ||
+               skip "need MDS >= 2.15.57.41 for quota over limit release fix"
+
+       local test_dir="$DIR/$tdir/test_dir"
+
+       setup_quota_test || error "setup quota failed with %?"
+       set_mdt_qtype $QTYPE || error "enable mdt quota failed"
+
+       $LFS setdirstripe -c 1 -i 0 $test_dir || error "setdirstripe failed"
+       chmod 777 $test_dir
+
+       test_preacquired_quota "$test_dir" "-u" "usr" "$TSTID"
+       test_preacquired_quota "$test_dir" "-g" "grp" "$TSTID"
+
+       is_project_quota_supported || return 0
+       test_preacquired_quota "$test_dir" "-p" "prj" "1000"
+}
+run_test 86 "Pre-acquired quota should be released if quota is over limit"
+
 quota_fini()
 {
        do_nodes $(comma_list $(nodes_list)) \