X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fquota%2Fqmt_entry.c;h=d3ed2545d0cc95063b583615a0e3d597d0607697;hb=128137adfc539dd2dd92040c14a63ff27f969820;hp=fa47f52d1b45fe16311aa461d57c41b404efb84a;hpb=984f4ce51fd38caaf0bd2b706a130f7f17c51638;p=fs%2Flustre-release.git diff --git a/lustre/quota/qmt_entry.c b/lustre/quota/qmt_entry.c index fa47f52..d3ed254 100644 --- a/lustre/quota/qmt_entry.c +++ b/lustre/quota/qmt_entry.c @@ -21,17 +21,13 @@ * GPL HEADER END */ /* - * Copyright (c) 2012 Intel, Inc. + * Copyright (c) 2012, 2016, Intel Corporation. * Use is subject to license terms. * * Author: Johann Lombardi * Author: Niu Yawei */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif - #define DEBUG_SUBSYSTEM S_LQUOTA #include "qmt_internal.h" @@ -47,7 +43,56 @@ static void qmt_lqe_init(struct lquota_entry *lqe, void *arg) LASSERT(lqe_is_master(lqe)); lqe->lqe_revoke_time = 0; - cfs_init_rwsem(&lqe->lqe_sem); + init_rwsem(&lqe->lqe_sem); +} + +/* Apply the default quota setting to the specified quota entry + * + * \param env - is the environment passed by the caller + * \param pool - is the quota pool of the quota entry + * \param lqe - is the lquota_entry object to apply default quota on + * \param create_record - if true, an global quota record will be created and + * write to the disk. + * + * \retval 0 : success + * \retval -ve : other appropriate errors + */ +int qmt_lqe_set_default(const struct lu_env *env, struct qmt_pool_info *pool, + struct lquota_entry *lqe, bool create_record) +{ + struct lquota_entry *lqe_def; + int rc = 0; + + ENTRY; + + if (lqe->lqe_id.qid_uid == 0) + RETURN(0); + + lqe_def = pool->qpi_grace_lqe[lqe->lqe_site->lqs_qtype]; + + LQUOTA_DEBUG(lqe, "inherit default quota"); + + lqe->lqe_is_default = true; + lqe->lqe_hardlimit = lqe_def->lqe_hardlimit; + lqe->lqe_softlimit = lqe_def->lqe_softlimit; + + if (create_record) { + lqe->lqe_uptodate = true; + rc = qmt_set_with_lqe(env, pool->qpi_qmt, lqe, 0, 0, + LQUOTA_GRACE_FLAG(0, LQUOTA_FLAG_DEFAULT), + QIF_TIMES, true, false); + + if (rc != 0) + LQUOTA_ERROR(lqe, "failed to create the global quota" + " record: %d", rc); + } + + if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0) + lqe->lqe_enforced = false; + else + lqe->lqe_enforced = true; + + RETURN(rc); } /* @@ -74,22 +119,19 @@ static int qmt_lqe_read(const struct lu_env *env, struct lquota_entry *lqe, switch (rc) { case -ENOENT: - /* no such entry, assume quota isn't enforced for this user */ - lqe->lqe_enforced = false; + qmt_lqe_set_default(env, pool, lqe, true); break; case 0: /* copy quota settings from on-disk record */ lqe->lqe_granted = qti->qti_glb_rec.qbr_granted; lqe->lqe_hardlimit = qti->qti_glb_rec.qbr_hardlimit; lqe->lqe_softlimit = qti->qti_glb_rec.qbr_softlimit; - lqe->lqe_gracetime = qti->qti_glb_rec.qbr_time; - - if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0) - /* {hard,soft}limit=0 means no quota enforced */ - lqe->lqe_enforced = false; - else - lqe->lqe_enforced = true; + lqe->lqe_gracetime = LQUOTA_GRACE(qti->qti_glb_rec.qbr_time); + if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0 && + (LQUOTA_FLAG(qti->qti_glb_rec.qbr_time) & + LQUOTA_FLAG_DEFAULT)) + qmt_lqe_set_default(env, pool, lqe, false); break; default: LQUOTA_ERROR(lqe, "failed to read quota entry from disk, rc:%d", @@ -97,6 +139,13 @@ static int qmt_lqe_read(const struct lu_env *env, struct lquota_entry *lqe, RETURN(rc); } + if (lqe->lqe_id.qid_uid == 0 || + (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0)) + /* {hard,soft}limit=0 means no quota enforced */ + lqe->lqe_enforced = false; + else + lqe->lqe_enforced = true; + LQUOTA_DEBUG(lqe, "read"); RETURN(0); } @@ -111,22 +160,21 @@ static int qmt_lqe_read(const struct lu_env *env, struct lquota_entry *lqe, */ static void qmt_lqe_debug(struct lquota_entry *lqe, void *arg, struct libcfs_debug_msg_data *msgdata, - const char *fmt, va_list args) + struct va_format *vaf) { struct qmt_pool_info *pool = (struct qmt_pool_info *)arg; - libcfs_debug_vmsg2(msgdata, fmt, args, - "qmt:%s pool:%d-%s id:"LPU64" enforced:%d hard:"LPU64 - " soft:"LPU64" granted:"LPU64" time:"LPU64" qunit:" - LPU64" edquot:%d may_rel:"LPU64" revoke:"LPU64"\n", - pool->qpi_qmt->qmt_svname, - pool->qpi_key & 0x0000ffff, - RES_NAME(pool->qpi_key >> 16), - lqe->lqe_id.qid_uid, lqe->lqe_enforced, - lqe->lqe_hardlimit, lqe->lqe_softlimit, - lqe->lqe_granted, lqe->lqe_gracetime, - lqe->lqe_qunit, lqe->lqe_edquot, lqe->lqe_may_rel, - lqe->lqe_revoke_time); + libcfs_debug_msg(msgdata, + "%pV qmt:%s pool:%s-%s id:%llu enforced:%d hard:%llu soft:%llu granted:%llu time:%llu qunit: %llu edquot:%d may_rel:%llu revoke:%lld default:%s\n", + vaf, pool->qpi_qmt->qmt_svname, + RES_NAME(pool->qpi_rtype), + pool->qpi_name, + lqe->lqe_id.qid_uid, lqe->lqe_enforced, + lqe->lqe_hardlimit, lqe->lqe_softlimit, + lqe->lqe_granted, lqe->lqe_gracetime, + lqe->lqe_qunit, lqe->lqe_edquot, lqe->lqe_may_rel, + lqe->lqe_revoke_time, + lqe->lqe_is_default ? "yes" : "no"); } /* @@ -268,9 +316,15 @@ int qmt_glb_write(const struct lu_env *env, struct thandle *th, /* fill global index with updated quota settings */ rec->qbr_granted = lqe->lqe_granted; - rec->qbr_hardlimit = lqe->lqe_hardlimit; - rec->qbr_softlimit = lqe->lqe_softlimit; - rec->qbr_time = lqe->lqe_gracetime; + if (lqe->lqe_is_default) { + rec->qbr_hardlimit = 0; + rec->qbr_softlimit = 0; + rec->qbr_time = LQUOTA_GRACE_FLAG(0, LQUOTA_FLAG_DEFAULT); + } else { + rec->qbr_hardlimit = lqe->lqe_hardlimit; + rec->qbr_softlimit = lqe->lqe_softlimit; + rec->qbr_time = lqe->lqe_gracetime; + } /* write new quota settings */ rc = lquota_disk_write(env, th, LQE_GLB_OBJ(lqe), &lqe->lqe_id, @@ -331,7 +385,7 @@ int qmt_slv_read(const struct lu_env *env, struct lquota_entry *lqe, RETURN(rc); } - LQUOTA_DEBUG(lqe, "successful slv read "LPU64, *granted); + LQUOTA_DEBUG(lqe, "successful slv read %llu", *granted); RETURN(0); } @@ -364,7 +418,7 @@ int qmt_slv_write(const struct lu_env *env, struct thandle *th, LASSERT(lqe_is_master(lqe)); LASSERT(lqe_is_locked(lqe)); - LQUOTA_DEBUG(lqe, "write slv "DFID" granted:"LPU64, + LQUOTA_DEBUG(lqe, "write slv "DFID" granted:%llu", PFID(lu_object_fid(&slv_obj->do_lu)), granted); /* never delete the entry, otherwise, it'll not be transferred @@ -379,7 +433,7 @@ int qmt_slv_write(const struct lu_env *env, struct thandle *th, (struct dt_rec *)rec, flags, ver); if (rc) { LQUOTA_ERROR(lqe, "failed to update slave index "DFID" granted:" - LPU64, PFID(lu_object_fid(&slv_obj->do_lu)), + "%llu", PFID(lu_object_fid(&slv_obj->do_lu)), granted); RETURN(rc); } @@ -416,7 +470,7 @@ void qmt_adjust_edquot(struct lquota_entry *lqe, __u64 now) struct qmt_pool_info *pool = lqe2qpi(lqe); ENTRY; - if (!lqe->lqe_enforced) + if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0) RETURN_EXIT; if (!lqe->lqe_edquot) { @@ -427,21 +481,29 @@ void qmt_adjust_edquot(struct lquota_entry *lqe, __u64 now) /* the qmt still has available space */ RETURN_EXIT; - if (lqe->lqe_qunit != pool->qpi_least_qunit) - /* we haven't reached the minimal qunit yet, so there is - * still hope that the rebalancing process might free up - * some quota space */ - RETURN_EXIT; + /* See comment in qmt_adjust_qunit(). LU-4139 */ + if (qmt_hard_exhausted(lqe) || + pool->qpi_rtype != LQUOTA_RES_DT) { + time64_t lapse; + + /* we haven't reached the minimal qunit yet so there is + * still hope that the rebalancing process might free + * up some quota space */ + if (lqe->lqe_qunit != pool->qpi_least_qunit) + RETURN_EXIT; - if (lqe->lqe_revoke_time == 0) /* least qunit value not sent to all slaves yet */ - RETURN_EXIT; + if (lqe->lqe_revoke_time == 0) + RETURN_EXIT; - if (lqe->lqe_may_rel != 0 && - cfs_time_before_64(cfs_time_shift_64(-QMT_REBA_TIMEOUT), - lqe->lqe_revoke_time)) /* Let's give more time to slave to release space */ - RETURN_EXIT; + lapse = ktime_get_seconds() - QMT_REBA_TIMEOUT; + if (lqe->lqe_may_rel != 0 && lqe->lqe_revoke_time > lapse) + RETURN_EXIT; + } else { + if (lqe->lqe_qunit > pool->qpi_soft_least_qunit) + RETURN_EXIT; + } /* set edquot flag */ lqe->lqe_edquot = true; @@ -472,6 +534,30 @@ void qmt_adjust_edquot(struct lquota_entry *lqe, __u64 now) EXIT; } +/* Using least_qunit when over block softlimit will seriously impact the + * write performance, we need to do some special tweaking on that. */ +static __u64 qmt_calc_softlimit(struct lquota_entry *lqe, bool *oversoft) +{ + struct qmt_pool_info *pool = lqe2qpi(lqe); + + LASSERT(lqe->lqe_softlimit != 0); + *oversoft = false; + /* No need to do special tweaking for inode limit */ + if (pool->qpi_rtype != LQUOTA_RES_DT) + return lqe->lqe_softlimit; + + if (lqe->lqe_granted <= lqe->lqe_softlimit + + pool->qpi_soft_least_qunit) { + return lqe->lqe_softlimit; + } else if (lqe->lqe_hardlimit != 0) { + *oversoft = true; + return lqe->lqe_hardlimit; + } else { + *oversoft = true; + return 0; + } +} + /* * Try to grant more quota space back to slave. * @@ -494,10 +580,16 @@ __u64 qmt_alloc_expand(struct lquota_entry *lqe, __u64 granted, __u64 spare) slv_cnt = lqe2qpi(lqe)->qpi_slv_nr[lqe->lqe_site->lqs_qtype]; qunit = lqe->lqe_qunit; - if (lqe->lqe_softlimit != 0) - remaining = lqe->lqe_softlimit; - else + /* See comment in qmt_adjust_qunit(). LU-4139. */ + if (lqe->lqe_softlimit != 0) { + bool oversoft; + remaining = qmt_calc_softlimit(lqe, &oversoft); + if (remaining == 0) + remaining = lqe->lqe_granted + + pool->qpi_soft_least_qunit; + } else { remaining = lqe->lqe_hardlimit; + } if (lqe->lqe_granted >= remaining) RETURN(0); @@ -540,12 +632,12 @@ void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe) { struct qmt_pool_info *pool = lqe2qpi(lqe); int slv_cnt; - __u64 qunit, limit; + __u64 qunit, limit, qunit2 = 0; ENTRY; LASSERT(lqe_is_locked(lqe)); - if (!lqe->lqe_enforced) + if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0) /* no quota limits */ RETURN_EXIT; @@ -560,7 +652,15 @@ void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe) * beyond the soft limit. This will impact performance, but that's the * price of an accurate grace time management. */ if (lqe->lqe_softlimit != 0) { - limit = lqe->lqe_softlimit; + bool oversoft; + /* As a compromise of write performance and the grace time + * accuracy, the block qunit size will be shrunk to + * qpi_soft_least_qunit when over softlimit. LU-4139. */ + limit = qmt_calc_softlimit(lqe, &oversoft); + if (oversoft) + qunit2 = pool->qpi_soft_least_qunit; + if (limit == 0) + GOTO(done, qunit = qunit2); } else if (lqe->lqe_hardlimit != 0) { limit = lqe->lqe_hardlimit; } else { @@ -596,8 +696,14 @@ void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe) /* current qunit value still fits, let's see if we can afford to * increase qunit now ... * To increase qunit again, we have to be under 25% */ - while (limit >= lqe->lqe_granted + 6 * qunit * slv_cnt) + while (qunit && limit >= lqe->lqe_granted + 6 * qunit * slv_cnt) qunit <<= 2; + + if (!qunit) { + qunit = limit; + do_div(qunit, 2 * slv_cnt); + } + } else { /* shrink qunit until we find a suitable value */ while (qunit > pool->qpi_least_qunit && @@ -605,11 +711,14 @@ void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe) qunit >>= 2; } + if (qunit2 && qunit > qunit2) + qunit = qunit2; +done: if (lqe->lqe_qunit == qunit) /* keep current qunit */ RETURN_EXIT; - LQUOTA_DEBUG(lqe, "%s qunit to "LPU64, + LQUOTA_DEBUG(lqe, "%s qunit to %llu", lqe->lqe_qunit < qunit ? "increasing" : "decreasing", qunit); @@ -624,7 +733,7 @@ void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe) qmt_id_lock_notify(pool->qpi_qmt, lqe); else if (lqe->lqe_qunit == pool->qpi_least_qunit) /* initial qunit value is the smallest one */ - lqe->lqe_revoke_time = cfs_time_current_64(); + lqe->lqe_revoke_time = ktime_get_seconds(); EXIT; } @@ -639,6 +748,6 @@ void qmt_revalidate(const struct lu_env *env, struct lquota_entry *lqe) * were initialized */ qmt_adjust_qunit(env, lqe); if (lqe->lqe_qunit != 0) - qmt_adjust_edquot(lqe, cfs_time_current_sec()); + qmt_adjust_edquot(lqe, ktime_get_real_seconds()); } }