From d01cae3ec8758fc27cae91266465b8d627ed55dd Mon Sep 17 00:00:00 2001 From: Sergey Cheremencev Date: Fri, 15 Oct 2021 17:11:47 +0300 Subject: [PATCH] LU-15110 quota: cosmetic changes in PQ cosmetic changes in PQ: - make tgt_pool_free and qmt_sarr_pool_free void - remove outdated comment from qmt_pool_lqes_lookup - replace tabs with spaces HPE-bug-id: LUS-9547 Change-Id: If4918b647eed1d971d00c521d010d0c72d349207 Signed-off-by: Sergey Cheremencev Reviewed-on: https://review.whamcloud.com/45258 Reviewed-by: Petros Koutoupis Tested-by: jenkins Tested-by: Maloo Reviewed-by: Shaun Tancheff Reviewed-by: Oleg Drokin --- lustre/include/lu_object.h | 2 +- lustre/obdclass/lu_tgt_pool.c | 11 ++++------- lustre/quota/qmt_entry.c | 3 +-- lustre/quota/qmt_lock.c | 4 ++-- lustre/quota/qmt_pool.c | 26 +++++++------------------- lustre/quota/qsd_entry.c | 2 +- lustre/tests/sanity-quota.sh | 7 +++---- 7 files changed, 19 insertions(+), 36 deletions(-) diff --git a/lustre/include/lu_object.h b/lustre/include/lu_object.h index ae6b91b..809afe1 100644 --- a/lustre/include/lu_object.h +++ b/lustre/include/lu_object.h @@ -1538,7 +1538,7 @@ struct lu_tgt_pool { int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count); int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count); int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx); -int lu_tgt_pool_free(struct lu_tgt_pool *op); +void lu_tgt_pool_free(struct lu_tgt_pool *op); int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts); int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count); diff --git a/lustre/obdclass/lu_tgt_pool.c b/lustre/obdclass/lu_tgt_pool.c index 7ca36bd..4bf0d16 100644 --- a/lustre/obdclass/lu_tgt_pool.c +++ b/lustre/obdclass/lu_tgt_pool.c @@ -200,7 +200,7 @@ EXPORT_SYMBOL(lu_tgt_pool_remove); int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts) { - int rc, i; + int i, rc = -ENOENT; ENTRY; down_read(&osts->op_rw_sem); @@ -208,7 +208,6 @@ int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts) if (osts->op_array[i] == idx) GOTO(out, rc = 0); } - rc = -ENOENT; EXIT; out: up_read(&osts->op_rw_sem); @@ -224,15 +223,13 @@ EXPORT_SYMBOL(lu_tgt_check_index); * deleted from memory. * * \param[in] op pool to be freed. - * - * \retval 0 on success or if pool was already freed */ -int lu_tgt_pool_free(struct lu_tgt_pool *op) +void lu_tgt_pool_free(struct lu_tgt_pool *op) { ENTRY; if (op->op_size == 0) - RETURN(0); + RETURN_EXIT; down_write(&op->op_rw_sem); @@ -242,6 +239,6 @@ int lu_tgt_pool_free(struct lu_tgt_pool *op) op->op_size = 0; up_write(&op->op_rw_sem); - RETURN(0); + EXIT; } EXPORT_SYMBOL(lu_tgt_pool_free); diff --git a/lustre/quota/qmt_entry.c b/lustre/quota/qmt_entry.c index c858279..4ad1ef0 100644 --- a/lustre/quota/qmt_entry.c +++ b/lustre/quota/qmt_entry.c @@ -301,8 +301,7 @@ struct thandle *qmt_trans_start(const struct lu_env *env, int qmt_glb_write_lqes(const struct lu_env *env, struct thandle *th, __u32 flags, __u64 *ver) { - int i, rc; - rc = 0; + int i, rc = 0; for (i = 0; i < qti_lqes_cnt(env); i++) { rc = qmt_glb_write(env, th, qti_lqes(env)[i], flags, ver); diff --git a/lustre/quota/qmt_lock.c b/lustre/quota/qmt_lock.c index f981838..c28b0b2 100644 --- a/lustre/quota/qmt_lock.c +++ b/lustre/quota/qmt_lock.c @@ -464,7 +464,7 @@ int qmt_lvbo_fill(struct lu_device *ld, struct ldlm_lock *lock, void *lvb, * we are thus dealing with an ID lock. */ struct lquota_entry *lqe = res->lr_lvb_data; struct qmt_device *qmt; - struct obd_uuid *uuid; + struct obd_uuid *uuid; int idx; uuid = &(lock)->l_export->exp_client_uuid; @@ -490,7 +490,7 @@ int qmt_lvbo_fill(struct lu_device *ld, struct ldlm_lock *lock, void *lvb, lqe_putref(lqe); } else { /* global quota lock */ - struct dt_object *obj = res->lr_lvb_data; + struct dt_object *obj = res->lr_lvb_data; /* return current version of global index */ qlvb->lvb_glb_ver = dt_version_get(env, obj); diff --git a/lustre/quota/qmt_pool.c b/lustre/quota/qmt_pool.c index d3b7704..5f9d98a 100644 --- a/lustre/quota/qmt_pool.c +++ b/lustre/quota/qmt_pool.c @@ -58,7 +58,7 @@ static inline int qmt_sarr_pool_init(struct qmt_pool_info *qpi); static inline int qmt_sarr_pool_add(struct qmt_pool_info *qpi, int idx, int min); static inline int qmt_sarr_pool_rem(struct qmt_pool_info *qpi, int idx); -static inline int qmt_sarr_pool_free(struct qmt_pool_info *qpi); +static inline void qmt_sarr_pool_free(struct qmt_pool_info *qpi); static inline int qmt_sarr_check_idx(struct qmt_pool_info *qpi, int idx); static inline void qmt_stop_pool_recalc(struct qmt_pool_info *qpi); @@ -821,18 +821,6 @@ int qmt_pool_lqes_lookup(const struct lu_env *env, qti_lqes_fini(env); GOTO(out, rc = PTR_ERR(lqe)); } - /* Only release could be done for not enforced lqe - * (see qmt_dqacq0). However slave could request to - * release more than not global lqe had granted before - * lqe_enforced was cleared. It is legal case, - * because even if current lqe is not enforced, - * lqes from other pools are still active and avilable - * for acquiring. Furthermore, skip not enforced lqe - * to don't make extra allocations. */ - /*if (!lqe_is_glbl(lqe) && !lqe->lqe_enforced) { - lqe_putref(lqe); - continue; - }*/ qti_lqes_add(env, lqe); } LASSERT(qti_lqes_glbl(env)->lqe_is_global); @@ -1539,19 +1527,19 @@ static inline int qmt_sarr_pool_rem(struct qmt_pool_info *qpi, int idx) } } -static inline int qmt_sarr_pool_free(struct qmt_pool_info *qpi) +static inline void qmt_sarr_pool_free(struct qmt_pool_info *qpi) { if (qmt_pool_global(qpi)) - return 0; + return; switch (qpi->qpi_rtype) { case LQUOTA_RES_DT: - if (!qpi->qpi_sarr.osts.op_array) - return 0; - return lu_tgt_pool_free(&qpi->qpi_sarr.osts); + if (qpi->qpi_sarr.osts.op_array) + lu_tgt_pool_free(&qpi->qpi_sarr.osts); + return; case LQUOTA_RES_MD: default: - return 0; + return; } } diff --git a/lustre/quota/qsd_entry.c b/lustre/quota/qsd_entry.c index c3a434f..ae724ec 100644 --- a/lustre/quota/qsd_entry.c +++ b/lustre/quota/qsd_entry.c @@ -54,7 +54,7 @@ static void qsd_lqe_init(struct lquota_entry *lqe, void *arg) /* * Update a slave quota entry. This is done by reading enforcement status from - * the copy of the global index and then how much is the slave currenly owns + * the copy of the global index and the amount the slave currenly owns * for this user from the slave index copy. * * \param env - the environment passed by the caller diff --git a/lustre/tests/sanity-quota.sh b/lustre/tests/sanity-quota.sh index 6734587..b630044 100755 --- a/lustre/tests/sanity-quota.sh +++ b/lustre/tests/sanity-quota.sh @@ -39,7 +39,6 @@ BLK_SZ=1024 MAX_DQ_TIME=604800 MAX_IQ_TIME=604800 QTYPE="ugp" -# QP exists since this version. Should be finally set before landing. VERSION_WITH_QP="2.13.53" mds_supports_qp() { [ $MDS1_VERSION -lt $(version_code $VERSION_WITH_QP) ] && @@ -564,7 +563,7 @@ test_1_check_write() { cancel_lru_locks osc sync; sync_all_data || true # sync means client wrote all it's cache, but id doesn't - # garantee that slave got new edquot trough glimpse. + # guarantee that slave received new edquot through glimpse. # so wait a little to be sure slave got it. sleep 5 $RUNAS $DD of=$testfile count=1 seek=$limit && @@ -1531,8 +1530,8 @@ test_3c() { error "set user quota failed" $LFS setquota -u $TSTUSR -b ${limit}M -B 0 --pool $qpool $DIR || error "set user quota failed" - # qpool has minimum soft limit, but it's grace is grater than - # grace period of qpool2. Thus write shouldn't fail when + # qpool has minimum soft limit, but its grace is greater than + # the grace period of qpool2. Thus write shouldn't fail when # hit qpool soft limit - only when reaches up qpool2 limit # after grace2 seconds. $LFS setquota -u $TSTUSR -b ${limit2}M -B 0 --pool $qpool2 $DIR || -- 1.8.3.1