* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
* Use is subject to license terms.
*/
/* List of pools managed by this master target */
struct list_head qmt_pool_list;
+ /* rw spinlock to protect pool list */
+ rwlock_t qmt_pool_lock;
/* procfs root directory for this qmt */
struct proc_dir_entry *qmt_proc;
};
+#define QPI_MAXNAME (LOV_MAXPOOLNAME + 1)
+
/*
* Per-pool quota information.
* The qmt creates one such structure for each pool
- * with quota enforced. All the structures are kept in a hash which is used to
- * determine whether or not quota is enforced for a given pool.
- * We currently only support the default data pool and default metadata pool
- * with the pool_id 0.
+ * with quota enforced. All the structures are kept in a list.
+ * We currently only support the default data pool and default metadata pool.
*/
struct qmt_pool_info {
- /* link to qmt's pool hash */
- struct hlist_node qpi_hash;
-
/* chained list of all pools managed by the same qmt */
struct list_head qpi_linkage;
- /* Pool key composed of pool_id | (pool_type << 16)
- * Only pool ID 0 is supported for now and the pool type is either
- * QUOTA_RES_MD or QUOTA_RES_DT.
- * immutable after creation. */
- __u32 qpi_key;
+ /* Could be LQUOTA_RES_MD or LQUOTA_RES_DT */
+ int qpi_rtype;
+ char qpi_name[QPI_MAXNAME];
/* track users of this pool instance */
atomic_t qpi_ref;
static inline
struct qmt_thread_info *qmt_info(const struct lu_env *env)
{
- struct qmt_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &qmt_thread_key);
- if (info == NULL) {
- lu_env_refill((struct lu_env *)env);
- info = lu_context_key_get(&env->le_ctx, &qmt_thread_key);
- }
- LASSERT(info);
- return info;
+ return lu_env_info(env, &qmt_thread_key);
}
/* helper routine to convert a lu_device into a qmt_device */
return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
}
+/* helper routine clearing the default quota setting */
+static inline void qmt_lqe_clear_default(struct lquota_entry *lqe)
+{
+ lqe->lqe_is_default = false;
+ lqe->lqe_gracetime &= ~((__u64)LQUOTA_FLAG_DEFAULT <<
+ LQUOTA_GRACE_BITS);
+}
+
/* number of seconds to wait for slaves to release quota space after
* rebalancing */
#define QMT_REBA_TIMEOUT 2
struct lu_fid *, struct lu_fid *, __u64 *,
struct obd_uuid *);
struct lquota_entry *qmt_pool_lqe_lookup(const struct lu_env *,
- struct qmt_device *, int, int, int,
+ struct qmt_device *, int, int,
union lquota_id *);
/* qmt_entry.c */
extern struct lquota_entry_operations qmt_lqe_ops;
+int qmt_lqe_set_default(const struct lu_env *env, struct qmt_pool_info *pool,
+ struct lquota_entry *lqe, bool create_record);
struct thandle *qmt_trans_start_with_slv(const struct lu_env *,
struct lquota_entry *,
struct dt_object *,
__u64 qmt_alloc_expand(struct lquota_entry *, __u64, __u64);
/* qmt_handler.c */
+int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
+ struct lquota_entry *lqe, __u64 hard, __u64 soft,
+ __u64 time, __u32 valid, bool is_default, bool is_updated);
int qmt_dqacq0(const struct lu_env *, struct lquota_entry *,
struct qmt_device *, struct obd_uuid *, __u32, __u64, __u64,
struct quota_body *);