* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Intel, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*/
-#include "lquota_internal.h"
-
#ifndef _QMT_INTERNAL_H
#define _QMT_INTERNAL_H
+#include "lquota_internal.h"
+
/*
* The Quota Master Target Device.
* The qmt is responsible for:
* Once we support quota on non-default pools, then more pools will
* be added to this hash table and pool master setup would have to be
* handled via configuration logs */
- cfs_hash_t *qmt_pool_hash;
+ struct cfs_hash *qmt_pool_hash;
/* List of pools managed by this master target */
- cfs_list_t qmt_pool_list;
+ struct list_head qmt_pool_list;
/* procfs root directory for this qmt */
- cfs_proc_dir_entry_t *qmt_proc;
+ struct proc_dir_entry *qmt_proc;
/* dedicated thread in charge of space rebalancing */
struct ptlrpc_thread qmt_reba_thread;
/* list of lqe entry which need space rebalancing */
- cfs_list_t qmt_reba_list;
+ struct list_head qmt_reba_list;
/* lock protecting rebalancing list */
spinlock_t qmt_reba_lock;
*/
struct qmt_pool_info {
/* link to qmt's pool hash */
- cfs_hlist_node_t qpi_hash;
+ struct hlist_node qpi_hash;
/* chained list of all pools managed by the same qmt */
- cfs_list_t qpi_linkage;
+ struct list_head qpi_linkage;
/* Pool key composed of pool_id | (pool_type << 16)
* Only pool ID 0 is supported for now and the pool type is either
__u32 qpi_key;
/* track users of this pool instance */
- cfs_atomic_t qpi_ref;
+ atomic_t qpi_ref;
/* back pointer to master target
* immutable after creation. */
struct lquota_entry *qpi_grace_lqe[MAXQUOTAS];
/* procfs root directory for this pool */
- cfs_proc_dir_entry_t *qpi_proc;
+ struct proc_dir_entry *qpi_proc;
/* pool directory where all indexes related to this pool instance are
* stored */
(slv) -= (cnt); \
} while (0)
-/* helper routine returning true when the id has run out of quota space, which
- * means that it has either:
- * - reached hardlimit
- * OR
- * - reached softlimit and grace time expired already */
-static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
+/* helper routine returning true when reached hardlimit */
+static inline bool qmt_hard_exhausted(struct lquota_entry *lqe)
{
if (lqe->lqe_hardlimit != 0 && lqe->lqe_granted >= lqe->lqe_hardlimit)
return true;
+ return false;
+}
+
+/* helper routine returning true when reached softlimit */
+static inline bool qmt_soft_exhausted(struct lquota_entry *lqe, __u64 now)
+{
if (lqe->lqe_softlimit != 0 && lqe->lqe_granted > lqe->lqe_softlimit &&
lqe->lqe_gracetime != 0 && now >= lqe->lqe_gracetime)
return true;
return false;
}
+/* helper routine returning true when the id has run out of quota space:
+ * - reached hardlimit
+ * OR
+ * - reached softlimit and grace time expired already */
+static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
+{
+ return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
+}
+
/* number of seconds to wait for slaves to release quota space after
* rebalancing */
#define QMT_REBA_TIMEOUT 2