* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, 2013, Intel Corporation.
* Use is subject to license terms.
*/
#define _QSD_INTERNAL_H
struct qsd_type_info;
+struct qsd_fsinfo;
/*
* A QSD instance implements quota enforcement support for a given OSD.
* are exported */
cfs_proc_dir_entry_t *qsd_proc;
+ /* export used for the connection to quota master */
+ struct obd_export *qsd_exp;
+
+ /* ldlm namespace used for quota locks */
+ struct ldlm_namespace *qsd_ns;
+
/* on-disk directory where to store index files for this qsd instance */
struct dt_object *qsd_root;
* future. For the time being, we can just use an array. */
struct qsd_qtype_info *qsd_type_array[MAXQUOTAS];
+ /* per-filesystem quota information */
+ struct qsd_fsinfo *qsd_fsinfo;
+
+ /* link into qfs_qsd_list of qfs_fsinfo */
+ cfs_list_t qsd_link;
+
+ /* list of lqe entry which might need quota space adjustment */
+ cfs_list_t qsd_adjust_list;
+
+ /* lock protecting adjust list */
+ spinlock_t qsd_adjust_lock;
+
+ /* dedicated thread for updating slave index files. */
+ struct ptlrpc_thread qsd_upd_thread;
+
+ /* list of update tasks */
+ cfs_list_t qsd_upd_list;
+
+ /* r/w spinlock protecting:
+ * - the state flags
+ * - the qsd update list
+ * - the deferred list
+ * - flags of the qsd_qtype_info */
+ rwlock_t qsd_lock;
+
+ /* Default quota settings which apply to all identifiers */
+ /* when blk qunit reaches this value, later write reqs from client
+ * should be sync. b=16642 */
+ unsigned long qsd_sync_threshold;
+
+ /* how long a service thread can wait for quota space.
+ * value dynamically computed from obd_timeout and at_max if not
+ * enforced here (via procfs) */
+ int qsd_timeout;
+
unsigned long qsd_is_md:1, /* managing quota for mdt */
- qsd_stopping:1; /* qsd_instance is stopping */
+ qsd_started:1, /* instance is now started */
+ qsd_prepared:1, /* qsd_prepare() successfully
+ * called */
+ qsd_exp_valid:1,/* qsd_exp is now valid */
+ qsd_stopping:1, /* qsd_instance is stopping */
+ qsd_acct_failed:1; /* failed to set up acct
+ * for one quota type */
};
/*
/* Global index FID to use for this quota type */
struct lu_fid qqi_fid;
+ /* Slave index FID allocated by the master */
+ struct lu_fid qqi_slv_fid;
+
/* back pointer to qsd device
* immutable after creation. */
struct qsd_instance *qqi_qsd;
+ /* handle of global quota lock */
+ struct lustre_handle qqi_lockh;
+
/* Local index files storing quota settings for this quota type */
struct dt_object *qqi_acct_obj; /* accounting object */
struct dt_object *qqi_slv_obj; /* slave index copy */
/* Current object versions */
__u64 qqi_slv_ver; /* slave index version */
__u64 qqi_glb_ver; /* global index version */
+
+ /* per quota ID information. All lquota entry are kept in a hash table
+ * and read from disk on cache miss. */
+ struct lquota_site *qqi_site;
+
+ /* Reintegration thread */
+ struct ptlrpc_thread qqi_reint_thread;
+
+ /* statistics on operations performed by this slave */
+ struct lprocfs_stats *qqi_stats;
+
+ /* deferred update for the global index copy */
+ cfs_list_t qqi_deferred_glb;
+ /* deferred update for the slave index copy */
+ cfs_list_t qqi_deferred_slv;
+
+ /* Various flags representing the current state of the slave for this
+ * quota type. */
+ unsigned long qqi_glb_uptodate:1, /* global index uptodate
+ with master */
+ qqi_slv_uptodate:1, /* slave index uptodate
+ with master */
+ qqi_reint:1; /* in reintegration or not */
+
+ /* A list of references to this instance, for debugging */
+ struct lu_ref qqi_reference;
+};
+
+/*
+ * Per-filesystem quota information
+ * Structure tracking quota enforcement status on a per-filesystem basis
+ */
+struct qsd_fsinfo {
+ /* filesystem name */
+ char qfs_name[MTI_NAME_MAXLEN];
+
+ /* what type of quota is enabled for each resource type. */
+ unsigned int qfs_enabled[LQUOTA_NR_RES];
+
+ /* list of all qsd_instance for this fs */
+ cfs_list_t qfs_qsd_list;
+ struct semaphore qfs_sem;
+
+ /* link to the global quota fsinfo list. */
+ cfs_list_t qfs_link;
+
+ /* reference count */
+ int qfs_ref;
};
/*
* Helper functions & prototypes
*/
+/* helper routine to find qsd_instance associated a lquota_entry */
+static inline struct qsd_qtype_info *lqe2qqi(struct lquota_entry *lqe)
+{
+ LASSERT(!lqe_is_master(lqe));
+ return (struct qsd_qtype_info *)lqe->lqe_site->lqs_parent;
+}
+
/* qqi_getref/putref is used to track users of a qqi structure */
static inline void qqi_getref(struct qsd_qtype_info *qqi)
{
#define QSD_RES_TYPE(qsd) ((qsd)->qsd_is_md ? LQUOTA_RES_MD : LQUOTA_RES_DT)
+/* udpate record for slave & global index copy */
+struct qsd_upd_rec {
+ cfs_list_t qur_link; /* link into qsd_upd_list */
+ union lquota_id qur_qid;
+ union lquota_rec qur_rec;
+ struct qsd_qtype_info *qur_qqi;
+ struct lquota_entry *qur_lqe;
+ __u64 qur_ver;
+ bool qur_global;
+};
+
/* Common data shared by qsd-level handlers. This is allocated per-thread to
* reduce stack consumption. */
struct qsd_thread_info {
struct ldlm_enqueue_info qti_einfo;
struct lustre_handle qti_lockh;
__u64 qti_slv_ver;
- union ldlm_wire_lvb qti_lvb;
+ struct lquota_lvb qti_lvb;
union {
struct quota_body qti_body;
struct idx_info qti_ii;
struct qsd_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &qsd_thread_key);
+ if (info == NULL) {
+ lu_env_refill((struct lu_env *)env);
+ info = lu_context_key_get(&env->le_ctx, &qsd_thread_key);
+ }
LASSERT(info);
return info;
}
+/* helper function to check whether a given quota type is enabled */
+static inline int qsd_type_enabled(struct qsd_instance *qsd, int type)
+{
+ int enabled, pool;
+
+ LASSERT(qsd != NULL);
+ LASSERT(type < MAXQUOTAS);
+
+ if (qsd->qsd_fsinfo == NULL)
+ return 0;
+
+ pool = qsd->qsd_is_md ? LQUOTA_RES_MD : LQUOTA_RES_DT;
+ enabled = qsd->qsd_fsinfo->qfs_enabled[pool - LQUOTA_FIRST_RES];
+
+ return enabled & (1 << type);
+}
+
+/* helper function to set new qunit and compute associated qtune value */
+static inline void qsd_set_qunit(struct lquota_entry *lqe, __u64 qunit)
+{
+ if (lqe->lqe_qunit == qunit)
+ return;
+
+ lqe->lqe_qunit = qunit;
+
+ /* With very large qunit support, we can't afford to have a static
+ * qtune value, e.g. with a 1PB qunit and qtune set to 50%, we would
+ * start pre-allocation when 512TB of free quota space remains.
+ * Therefore, we adapt qtune depending on the actual qunit value */
+ if (qunit == 0) /* if qunit is NULL */
+ lqe->lqe_qtune = 0; /* qtune = 0 */
+ else if (qunit == 1024) /* if 1MB or 1K inodes */
+ lqe->lqe_qtune = qunit >> 1; /* => 50% */
+ else if (qunit <= 1024 * 1024) /* up to 1GB or 1M inodes */
+ lqe->lqe_qtune = qunit >> 2; /* => 25% */
+ else if (qunit <= 4 * 1024 * 1024) /* up to 16GB or 16M inodes */
+ lqe->lqe_qtune = qunit >> 3; /* => 12.5% */
+ else /* above 4GB/4M */
+ lqe->lqe_qtune = 1024 * 1024; /* value capped to 1GB/1M */
+
+ LQUOTA_DEBUG(lqe, "changing qunit & qtune");
+
+ /* turn on pre-acquire when qunit is modified */
+ lqe->lqe_nopreacq = false;
+}
+
+#define QSD_WB_INTERVAL 60 /* 60 seconds */
+
+/* helper function calculating how long a service thread should be waiting for
+ * quota space */
+static inline int qsd_wait_timeout(struct qsd_instance *qsd)
+{
+ if (qsd->qsd_timeout != 0)
+ return qsd->qsd_timeout;
+ return min_t(int, at_max / 2, obd_timeout / 2);
+}
+
+/* qsd_entry.c */
+extern struct lquota_entry_operations qsd_lqe_ops;
+int qsd_refresh_usage(const struct lu_env *, struct lquota_entry *);
+int qsd_update_index(const struct lu_env *, struct qsd_qtype_info *,
+ union lquota_id *, bool, __u64, void *);
+int qsd_update_lqe(const struct lu_env *, struct lquota_entry *, bool,
+ void *);
+int qsd_write_version(const struct lu_env *, struct qsd_qtype_info *,
+ __u64, bool);
+
+/* qsd_lock.c */
+extern struct ldlm_enqueue_info qsd_glb_einfo;
+extern struct ldlm_enqueue_info qsd_id_einfo;
+int qsd_id_lock_match(struct lustre_handle *, struct lustre_handle *);
+int qsd_id_lock_cancel(const struct lu_env *, struct lquota_entry *);
+
+/* qsd_reint.c */
+int qsd_start_reint_thread(struct qsd_qtype_info *);
+void qsd_stop_reint_thread(struct qsd_qtype_info *);
+
/* qsd_request.c */
typedef void (*qsd_req_completion_t) (const struct lu_env *,
struct qsd_qtype_info *,
struct quota_body *, struct quota_body *,
struct lustre_handle *,
- union ldlm_wire_lvb *, void *, int);
+ struct lquota_lvb *, void *, int);
int qsd_send_dqacq(const struct lu_env *, struct obd_export *,
struct quota_body *, bool, qsd_req_completion_t,
struct qsd_qtype_info *, struct lustre_handle *,
struct lquota_entry *);
int qsd_intent_lock(const struct lu_env *, struct obd_export *,
struct quota_body *, bool, int, qsd_req_completion_t,
- struct qsd_qtype_info *, union ldlm_wire_lvb *, void *);
+ struct qsd_qtype_info *, struct lquota_lvb *, void *);
int qsd_fetch_index(const struct lu_env *, struct obd_export *,
- struct idx_info *, unsigned int, cfs_page_t **, bool *);
+ struct idx_info *, unsigned int, struct page **, bool *);
+
+/* qsd_writeback.c */
+void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
+void qsd_upd_schedule(struct qsd_qtype_info *, struct lquota_entry *,
+ union lquota_id *, union lquota_rec *, __u64, bool);
+/* qsd_config.c */
+struct qsd_fsinfo *qsd_get_fsinfo(char *, bool);
+void qsd_put_fsinfo(struct qsd_fsinfo *);
+int qsd_process_config(struct lustre_cfg *);
+
+/* qsd_handler.c */
+int qsd_adjust(const struct lu_env *, struct lquota_entry *);
+/* qsd_writeback.c */
+void qsd_upd_schedule(struct qsd_qtype_info *, struct lquota_entry *,
+ union lquota_id *, union lquota_rec *, __u64, bool);
+void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
+int qsd_start_upd_thread(struct qsd_instance *);
+void qsd_stop_upd_thread(struct qsd_instance *);
+void qsd_adjust_schedule(struct lquota_entry *, bool, bool);
#endif /* _QSD_INTERNAL_H */