+
+/* udpate record for slave & global index copy */
+struct qsd_upd_rec {
+ cfs_list_t qur_link; /* link into qsd_upd_list */
+ union lquota_id qur_qid;
+ union lquota_rec qur_rec;
+ struct qsd_qtype_info *qur_qqi;
+ struct lquota_entry *qur_lqe;
+ __u64 qur_ver;
+ bool qur_global;
+};
+
+/* Common data shared by qsd-level handlers. This is allocated per-thread to
+ * reduce stack consumption. */
+struct qsd_thread_info {
+ union lquota_rec qti_rec;
+ union lquota_id qti_id;
+ struct lu_fid qti_fid;
+ struct ldlm_res_id qti_resid;
+ struct ldlm_enqueue_info qti_einfo;
+ struct lustre_handle qti_lockh;
+ __u64 qti_slv_ver;
+ struct lquota_lvb qti_lvb;
+ union {
+ struct quota_body qti_body;
+ struct idx_info qti_ii;
+ };
+ char qti_buf[MTI_NAME_MAXLEN];
+};
+
+extern struct lu_context_key qsd_thread_key;
+
+static inline
+struct qsd_thread_info *qsd_info(const struct lu_env *env)
+{
+ struct qsd_thread_info *info;
+
+ info = lu_context_key_get(&env->le_ctx, &qsd_thread_key);
+ if (info == NULL) {
+ lu_env_refill((struct lu_env *)env);
+ info = lu_context_key_get(&env->le_ctx, &qsd_thread_key);
+ }
+ LASSERT(info);
+ return info;
+}
+
+/* helper function to check whether a given quota type is enabled */
+static inline int qsd_type_enabled(struct qsd_instance *qsd, int type)
+{
+ int enabled, pool;
+
+ LASSERT(qsd != NULL);
+ LASSERT(type < MAXQUOTAS);
+
+ if (qsd->qsd_fsinfo == NULL)
+ return 0;
+
+ pool = qsd->qsd_is_md ? LQUOTA_RES_MD : LQUOTA_RES_DT;
+ enabled = qsd->qsd_fsinfo->qfs_enabled[pool - LQUOTA_FIRST_RES];
+
+ return enabled & (1 << type);
+}
+
+/* helper function to set new qunit and compute associated qtune value */
+static inline void qsd_set_qunit(struct lquota_entry *lqe, __u64 qunit)
+{
+ if (lqe->lqe_qunit == qunit)
+ return;
+
+ lqe->lqe_qunit = qunit;
+
+ /* With very large qunit support, we can't afford to have a static
+ * qtune value, e.g. with a 1PB qunit and qtune set to 50%, we would
+ * start pre-allocation when 512TB of free quota space remains.
+ * Therefore, we adapt qtune depending on the actual qunit value */
+ if (qunit == 0) /* if qunit is NULL */
+ lqe->lqe_qtune = 0; /* qtune = 0 */
+ else if (qunit == 1024) /* if 1MB or 1K inodes */
+ lqe->lqe_qtune = qunit >> 1; /* => 50% */
+ else if (qunit <= 1024 * 1024) /* up to 1GB or 1M inodes */
+ lqe->lqe_qtune = qunit >> 2; /* => 25% */
+ else if (qunit <= 4 * 1024 * 1024) /* up to 16GB or 16M inodes */
+ lqe->lqe_qtune = qunit >> 3; /* => 12.5% */
+ else /* above 4GB/4M */
+ lqe->lqe_qtune = 1024 * 1024; /* value capped to 1GB/1M */
+
+ LQUOTA_DEBUG(lqe, "changing qunit & qtune");
+
+ /* turn on pre-acquire when qunit is modified */
+ lqe->lqe_nopreacq = false;
+}
+
+#define QSD_WB_INTERVAL 60 /* 60 seconds */
+
+/* helper function calculating how long a service thread should be waiting for
+ * quota space */
+static inline int qsd_wait_timeout(struct qsd_instance *qsd)
+{
+ if (qsd->qsd_timeout != 0)
+ return qsd->qsd_timeout;
+ return min_t(int, at_max / 2, obd_timeout / 2);
+}
+
+/* qsd_entry.c */
+extern struct lquota_entry_operations qsd_lqe_ops;
+int qsd_refresh_usage(const struct lu_env *, struct lquota_entry *);
+int qsd_update_index(const struct lu_env *, struct qsd_qtype_info *,
+ union lquota_id *, bool, __u64, void *);
+int qsd_update_lqe(const struct lu_env *, struct lquota_entry *, bool,
+ void *);
+int qsd_write_version(const struct lu_env *, struct qsd_qtype_info *,
+ __u64, bool);
+
+/* qsd_lock.c */
+extern struct ldlm_enqueue_info qsd_glb_einfo;
+extern struct ldlm_enqueue_info qsd_id_einfo;
+int qsd_id_lock_match(struct lustre_handle *, struct lustre_handle *);
+int qsd_id_lock_cancel(const struct lu_env *, struct lquota_entry *);
+
+/* qsd_reint.c */
+int qsd_start_reint_thread(struct qsd_qtype_info *);
+void qsd_stop_reint_thread(struct qsd_qtype_info *);
+
+/* qsd_request.c */
+typedef void (*qsd_req_completion_t) (const struct lu_env *,
+ struct qsd_qtype_info *,
+ struct quota_body *, struct quota_body *,
+ struct lustre_handle *,
+ struct lquota_lvb *, void *, int);
+int qsd_send_dqacq(const struct lu_env *, struct obd_export *,
+ struct quota_body *, bool, qsd_req_completion_t,
+ struct qsd_qtype_info *, struct lustre_handle *,
+ struct lquota_entry *);
+int qsd_intent_lock(const struct lu_env *, struct obd_export *,
+ struct quota_body *, bool, int, qsd_req_completion_t,
+ struct qsd_qtype_info *, struct lquota_lvb *, void *);
+int qsd_fetch_index(const struct lu_env *, struct obd_export *,
+ struct idx_info *, unsigned int, cfs_page_t **, bool *);
+
+/* qsd_writeback.c */
+void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
+void qsd_upd_schedule(struct qsd_qtype_info *, struct lquota_entry *,
+ union lquota_id *, union lquota_rec *, __u64, bool);
+/* qsd_config.c */
+struct qsd_fsinfo *qsd_get_fsinfo(char *, bool);
+void qsd_put_fsinfo(struct qsd_fsinfo *);
+int qsd_process_config(struct lustre_cfg *);
+
+/* qsd_handler.c */
+int qsd_adjust(const struct lu_env *, struct lquota_entry *);
+
+/* qsd_writeback.c */
+void qsd_upd_schedule(struct qsd_qtype_info *, struct lquota_entry *,
+ union lquota_id *, union lquota_rec *, __u64, bool);
+void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
+int qsd_start_upd_thread(struct qsd_instance *);
+void qsd_stop_upd_thread(struct qsd_instance *);
+void qsd_adjust_schedule(struct lquota_entry *, bool, bool);