/*
* Vector of quota entry operations supported on the master
*/
-struct lquota_entry_operations qmt_lqe_ops = {
+const struct lquota_entry_operations qmt_lqe_ops = {
.lqe_init = qmt_lqe_init,
.lqe_read = qmt_lqe_read,
.lqe_debug = qmt_lqe_debug,
if (slv_obj != NULL) {
/* reserve credits for slave index update */
- rc = lquota_disk_declare_write(env, th, slv_obj, &lqe->lqe_id);
+ rc = lquota_disk_declare_write(env, th, slv_obj,
+ &lqes[0]->lqe_id);
if (rc)
GOTO(out, rc);
}
if (rc) {
dt_trans_stop(env, qmt->qmt_child, th);
th = ERR_PTR(rc);
- LQUOTA_ERROR(lqe, "failed to slv declare write for "DFID
+ LQUOTA_ERROR(lqes[0], "failed to slv declare write for "DFID
", rc:%d", PFID(lu_object_fid(&slv_obj->do_lu)),
rc);
} else {
RETURN(0);
}
+static inline void
+qmt_adjust_qunit_set_revoke(const struct lu_env *env, struct lquota_entry *lqe,
+ unsigned long least_qunit)
+{
+ struct lquota_entry *lqe2;
+ time64_t min = 0;
+ int i;
+
+ if (qti_lqes_cnt(env) <= 1)
+ return;
+
+ for (i = 0; i < qti_lqes_cnt(env); i++) {
+ lqe2 = qti_lqes(env)[i];
+ if ((lqe2->lqe_qunit == least_qunit) && lqe2->lqe_revoke_time) {
+ if (!min) {
+ min = lqe2->lqe_revoke_time;
+ continue;
+ }
+ min = lqe2->lqe_revoke_time < min ?
+ lqe2->lqe_revoke_time : min;
+ }
+ }
+
+ lqe->lqe_revoke_time = min;
+}
+
+
/*
* Adjust qunit size according to quota limits and total granted count.
* The caller must have locked the lqe.
/* reset revoke time */
lqe->lqe_revoke_time = 0;
- if (lqe->lqe_qunit >= qunit &&
- (lqe->lqe_qunit == pool->qpi_least_qunit)) {
- /* initial qunit value is the smallest one */
- lqe->lqe_revoke_time = ktime_get_seconds();
+ if (lqe->lqe_qunit == pool->qpi_least_qunit) {
+ if (lqe->lqe_qunit >= qunit)
+ /* initial qunit value is the smallest one */
+ lqe->lqe_revoke_time = ktime_get_seconds();
+ /* If there are several lqes and lqe_revoke_time is set for
+ * some of them, it means appropriate OSTs have been already
+ * notified with the least qunit and there is no chance to
+ * free more space. Find an lqe with the minimum(earliest)
+ * revoke_time and set this time to the current one.
+ */
+ qmt_adjust_qunit_set_revoke(env, lqe, pool->qpi_least_qunit);
}
RETURN(need_reseed);
}
if (!lqe_gl->lqe_glbl_data &&
(req_has_rep(qb_flags) || req_is_rel(qb_flags))) {
if (need_reseed)
- CWARN("%s: can't notify - lge_glbl_data is not set",
- qmt->qmt_svname);
+ CDEBUG(D_QUOTA,
+ "%s: can not notify - lge_glbl_data is not set\n",
+ qmt->qmt_svname);
return need_reseed;
}
{
struct qmt_thread_info *qti = qmt_info(env);
- if (qti->qti_lqes_cnt > qti->qti_lqes_num) {
+ if (qti->qti_lqes_cnt >= qti->qti_lqes_num) {
struct lquota_entry **lqes;
lqes = qti->qti_lqes;
OBD_ALLOC(lqes, sizeof(lqe) * qti->qti_lqes_num * 2);
if (qti->qti_lqes_num > QMT_MAX_POOL_NUM)
OBD_FREE(qti->qti_lqes,
qti->qti_lqes_num * sizeof(struct lquota_entry *));
+
+ qti->qti_lqes_num = 0;
+ qti->qti_lqes_cnt = 0;
}
-inline int qti_lqes_min_qunit(const struct lu_env *env)
+__u64 qti_lqes_min_qunit(const struct lu_env *env)
{
- int i, min, qunit;
+ __u64 min, qunit;
+ int i;
for (i = 1, min = qti_lqe_qunit(env, 0); i < qti_lqes_cnt(env); i++) {
qunit = qti_lqe_qunit(env, i);
- if (qunit < min)
+ /* if qunit is 0, lqe is not enforced and we can ignore it */
+ if (qunit && qunit < min)
min = qunit;
}
return min;
}
-inline int qti_lqes_edquot(const struct lu_env *env)
+int qti_lqes_edquot(const struct lu_env *env)
{
int i;
return 0;
}
-inline int qti_lqes_restore_init(const struct lu_env *env)
+int qti_lqes_restore_init(const struct lu_env *env)
{
int rc = 0;
- if (qti_lqes_cnt(env) > QMT_MAX_POOL_NUM) {
+ if (qti_lqes_inited(env) && qti_lqes_cnt(env) > QMT_MAX_POOL_NUM) {
OBD_ALLOC(qmt_info(env)->qti_lqes_rstr,
qti_lqes_cnt(env) * sizeof(struct qmt_lqe_restore));
if (!qti_lqes_rstr(env))
return rc;
}
-inline void qti_lqes_restore_fini(const struct lu_env *env)
+void qti_lqes_restore_fini(const struct lu_env *env)
{
- if (qti_lqes_cnt(env) > QMT_MAX_POOL_NUM)
+ if (qti_lqes_inited(env) && qti_lqes_cnt(env) > QMT_MAX_POOL_NUM)
OBD_FREE(qmt_info(env)->qti_lqes_rstr,
qti_lqes_cnt(env) * sizeof(struct qmt_lqe_restore));
}
-inline void qti_lqes_write_lock(const struct lu_env *env)
+void qti_lqes_write_lock(const struct lu_env *env)
{
int i;
lqe_write_lock(qti_lqes(env)[i]);
}
-inline void qti_lqes_write_unlock(const struct lu_env *env)
+void qti_lqes_write_unlock(const struct lu_env *env)
{
int i;
int i, j, idx;
ENTRY;
+ if (!qti_lqes_cnt(env))
+ RETURN_EXIT;
/* lqes array is sorted by qunit - the first entry has minimum qunit.
* Thus start seeding global qunit's array beginning from the 1st lqe
* and appropriate pool. If pools overlapped, slaves from this