* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Intel, Inc.
+ * Copyright (c) 2012, 2016, Intel Corporation.
* Use is subject to license terms.
*/
-#include "lquota_internal.h"
-
#ifndef _QMT_INTERNAL_H
#define _QMT_INTERNAL_H
+#include "lquota_internal.h"
+
/*
* The Quota Master Target Device.
* The qmt is responsible for:
* Once we support quota on non-default pools, then more pools will
* be added to this hash table and pool master setup would have to be
* handled via configuration logs */
- cfs_hash_t *qmt_pool_hash;
+ struct cfs_hash *qmt_pool_hash;
/* List of pools managed by this master target */
- cfs_list_t qmt_pool_list;
+ struct list_head qmt_pool_list;
/* procfs root directory for this qmt */
- cfs_proc_dir_entry_t *qmt_proc;
+ struct proc_dir_entry *qmt_proc;
+
+ /* dedicated thread in charge of space rebalancing */
+ struct ptlrpc_thread qmt_reba_thread;
+
+ /* list of lqe entry which need space rebalancing */
+ struct list_head qmt_reba_list;
+
+ /* lock protecting rebalancing list */
+ spinlock_t qmt_reba_lock;
unsigned long qmt_stopping:1; /* qmt is stopping */
*/
struct qmt_pool_info {
/* link to qmt's pool hash */
- cfs_hlist_node_t qpi_hash;
+ struct hlist_node qpi_hash;
/* chained list of all pools managed by the same qmt */
- cfs_list_t qpi_linkage;
+ struct list_head qpi_linkage;
/* Pool key composed of pool_id | (pool_type << 16)
* Only pool ID 0 is supported for now and the pool type is either
__u32 qpi_key;
/* track users of this pool instance */
- cfs_atomic_t qpi_ref;
+ atomic_t qpi_ref;
/* back pointer to master target
* immutable after creation. */
/* pointer to dt object associated with global indexes for both user
* and group quota */
- struct dt_object *qpi_glb_obj[MAXQUOTAS];
+ struct dt_object *qpi_glb_obj[LL_MAXQUOTAS];
/* A pool supports two different quota types: user and group quota.
* Each quota type has its own global index and lquota_entry hash table.
*/
- struct lquota_site *qpi_site[MAXQUOTAS];
+ struct lquota_site *qpi_site[LL_MAXQUOTAS];
/* number of slaves registered for each quota types */
- int qpi_slv_nr[MAXQUOTAS];
+ int qpi_slv_nr[LL_MAXQUOTAS];
+
+ /* reference on lqe (ID 0) storing grace time. */
+ struct lquota_entry *qpi_grace_lqe[LL_MAXQUOTAS];
/* procfs root directory for this pool */
- cfs_proc_dir_entry_t *qpi_proc;
+ struct proc_dir_entry *qpi_proc;
/* pool directory where all indexes related to this pool instance are
* stored */
/* Global quota parameters which apply to all quota type */
/* the least value of qunit */
unsigned long qpi_least_qunit;
+
+ /* Least value of qunit when soft limit is exceeded.
+ *
+ * When soft limit is exceeded, qunit will be shrinked to least_qunit
+ * (1M for block limit), that results in significant write performance
+ * drop since the client will turn to sync write from now on.
+ *
+ * To retain the write performance in an acceptable level, we choose
+ * to sacrifice grace time accuracy a bit and use a larger least_qunit
+ * when soft limit is exceeded. It's (qpi_least_qunit * 4) by default,
+ * and user may enlarge it via procfs to get even better performance
+ * (with the cost of losing more grace time accuracy).
+ *
+ * See qmt_calc_softlimit().
+ */
+ unsigned long qpi_soft_least_qunit;
};
/*
static inline bool lqe_is_locked(struct lquota_entry *lqe)
{
LASSERT(lqe_is_master(lqe));
- if (cfs_down_write_trylock(&lqe->lqe_sem) == 0)
+ if (down_write_trylock(&lqe->lqe_sem) == 0)
return true;
lqe_write_unlock(lqe);
return false;
struct qmt_thread_info {
union lquota_rec qti_rec;
union lquota_id qti_id;
- union lquota_id qti_id_bis;
char qti_buf[MTI_NAME_MAXLEN];
struct lu_fid qti_fid;
struct ldlm_res_id qti_resid;
union ldlm_gl_desc qti_gl_desc;
struct quota_body qti_body;
- struct quota_body qti_repbody;
struct qmt_lqe_restore qti_restore;
};
#define LQE_ROOT(lqe) (lqe2qpi(lqe)->qpi_root)
#define LQE_GLB_OBJ(lqe) (lqe2qpi(lqe)->qpi_glb_obj[lqe->lqe_site->lqs_qtype])
+/* helper function returning grace time to use for a given lquota entry */
+static inline __u64 qmt_lqe_grace(struct lquota_entry *lqe)
+{
+ struct qmt_pool_info *pool = lqe2qpi(lqe);
+ struct lquota_entry *grace_lqe;
+
+ grace_lqe = pool->qpi_grace_lqe[lqe->lqe_site->lqs_qtype];
+ LASSERT(grace_lqe != NULL);
+
+ return grace_lqe->lqe_gracetime;
+}
+
static inline void qmt_restore(struct lquota_entry *lqe,
struct qmt_lqe_restore *restore)
{
lqe->lqe_qunit = restore->qlr_qunit;
}
+#define QMT_GRANT(lqe, slv, cnt) \
+ do { \
+ (lqe)->lqe_granted += (cnt); \
+ (slv) += (cnt); \
+ } while (0)
+#define QMT_REL(lqe, slv, cnt) \
+ do { \
+ (lqe)->lqe_granted -= (cnt); \
+ (slv) -= (cnt); \
+ } while (0)
+
+/* helper routine returning true when reached hardlimit */
+static inline bool qmt_hard_exhausted(struct lquota_entry *lqe)
+{
+ if (lqe->lqe_hardlimit != 0 && lqe->lqe_granted >= lqe->lqe_hardlimit)
+ return true;
+ return false;
+}
+
+/* helper routine returning true when reached softlimit */
+static inline bool qmt_soft_exhausted(struct lquota_entry *lqe, __u64 now)
+{
+ if (lqe->lqe_softlimit != 0 && lqe->lqe_granted > lqe->lqe_softlimit &&
+ lqe->lqe_gracetime != 0 && now >= lqe->lqe_gracetime)
+ return true;
+ return false;
+}
+
+/* helper routine returning true when the id has run out of quota space:
+ * - reached hardlimit
+ * OR
+ * - reached softlimit and grace time expired already */
+static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
+{
+ return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
+}
+
+/* number of seconds to wait for slaves to release quota space after
+ * rebalancing */
+#define QMT_REBA_TIMEOUT 2
+
/* qmt_pool.c */
void qmt_pool_fini(const struct lu_env *, struct qmt_device *);
int qmt_pool_init(const struct lu_env *, struct qmt_device *);
int qmt_slv_read(const struct lu_env *, struct lquota_entry *,
struct dt_object *, __u64 *);
int qmt_validate_limits(struct lquota_entry *, __u64, __u64);
+void qmt_adjust_qunit(const struct lu_env *, struct lquota_entry *);
+void qmt_adjust_edquot(struct lquota_entry *, __u64);
+void qmt_revalidate(const struct lu_env *, struct lquota_entry *);
+__u64 qmt_alloc_expand(struct lquota_entry *, __u64, __u64);
+
+/* qmt_handler.c */
+int qmt_dqacq0(const struct lu_env *, struct lquota_entry *,
+ struct qmt_device *, struct obd_uuid *, __u32, __u64, __u64,
+ struct quota_body *);
/* qmt_lock.c */
int qmt_intent_policy(const struct lu_env *, struct lu_device *,
int qmt_lvbo_size(struct lu_device *, struct ldlm_lock *);
int qmt_lvbo_fill(struct lu_device *, struct ldlm_lock *, void *, int);
int qmt_lvbo_free(struct lu_device *, struct ldlm_resource *);
+int qmt_start_reba_thread(struct qmt_device *);
+void qmt_stop_reba_thread(struct qmt_device *);
+void qmt_glb_lock_notify(const struct lu_env *, struct lquota_entry *, __u64);
+void qmt_id_lock_notify(struct qmt_device *, struct lquota_entry *);
#endif /* _QMT_INTERNAL_H */