* GPL HEADER END
*/
/*
- * Copyright (c) 2012 Whamcloud, Inc.
+ * Copyright (c) 2012, 2014, Intel Corporation.
* Use is subject to license terms.
*/
/* procfs directory where information related to the underlying slaves
* are exported */
- cfs_proc_dir_entry_t *qsd_proc;
+ struct proc_dir_entry *qsd_proc;
/* export used for the connection to quota master */
- struct obd_export *qsd_exp;
+ struct obd_export *qsd_exp;
/* ldlm namespace used for quota locks */
- struct ldlm_namespace *qsd_ns;
+ struct ldlm_namespace *qsd_ns;
/* on-disk directory where to store index files for this qsd instance */
struct dt_object *qsd_root;
struct qsd_fsinfo *qsd_fsinfo;
/* link into qfs_qsd_list of qfs_fsinfo */
- cfs_list_t qsd_link;
+ struct list_head qsd_link;
/* list of lqe entry which might need quota space adjustment */
- cfs_list_t qsd_adjust_list;
+ struct list_head qsd_adjust_list;
/* lock protecting adjust list */
- cfs_spinlock_t qsd_adjust_lock;
+ spinlock_t qsd_adjust_lock;
/* dedicated thread for updating slave index files. */
struct ptlrpc_thread qsd_upd_thread;
/* list of update tasks */
- cfs_list_t qsd_upd_list;
+ struct list_head qsd_upd_list;
/* r/w spinlock protecting:
* - the state flags
* - the qsd update list
* - the deferred list
* - flags of the qsd_qtype_info */
- cfs_rwlock_t qsd_lock;
+ rwlock_t qsd_lock;
/* Default quota settings which apply to all identifiers */
/* when blk qunit reaches this value, later write reqs from client
* should be sync. b=16642 */
unsigned long qsd_sync_threshold;
+ /* how long a service thread can wait for quota space.
+ * value dynamically computed from obd_timeout and at_max if not
+ * enforced here (via procfs) */
+ int qsd_timeout;
+
unsigned long qsd_is_md:1, /* managing quota for mdt */
qsd_started:1, /* instance is now started */
qsd_prepared:1, /* qsd_prepare() successfully
* called */
qsd_exp_valid:1,/* qsd_exp is now valid */
- qsd_stopping:1; /* qsd_instance is stopping */
+ qsd_stopping:1, /* qsd_instance is stopping */
+ qsd_acct_failed:1; /* failed to set up acct
+ * for one quota type */
};
/*
*/
struct qsd_qtype_info {
/* reference count incremented by each user of this structure */
- cfs_atomic_t qqi_ref;
+ atomic_t qqi_ref;
/* quota type, either USRQUOTA or GRPQUOTA
* immutable after creation. */
struct lprocfs_stats *qqi_stats;
/* deferred update for the global index copy */
- cfs_list_t qqi_deferred_glb;
+ struct list_head qqi_deferred_glb;
/* deferred update for the slave index copy */
- cfs_list_t qqi_deferred_slv;
+ struct list_head qqi_deferred_slv;
/* Various flags representing the current state of the slave for this
* quota type. */
unsigned int qfs_enabled[LQUOTA_NR_RES];
/* list of all qsd_instance for this fs */
- cfs_list_t qfs_qsd_list;
- cfs_semaphore_t qfs_sem;
+ struct list_head qfs_qsd_list;
+ struct mutex qfs_mutex;
/* link to the global quota fsinfo list. */
- cfs_list_t qfs_link;
+ struct list_head qfs_link;
/* reference count */
int qfs_ref;
/* qqi_getref/putref is used to track users of a qqi structure */
static inline void qqi_getref(struct qsd_qtype_info *qqi)
{
- cfs_atomic_inc(&qqi->qqi_ref);
+ atomic_inc(&qqi->qqi_ref);
}
static inline void qqi_putref(struct qsd_qtype_info *qqi)
{
- LASSERT(cfs_atomic_read(&qqi->qqi_ref) > 0);
- cfs_atomic_dec(&qqi->qqi_ref);
+ LASSERT(atomic_read(&qqi->qqi_ref) > 0);
+ atomic_dec(&qqi->qqi_ref);
}
-/* all kind of operations supported by qsd_dqacq() */
-enum qsd_ops {
- QSD_ADJ, /* adjust quota space based on current qunit */
- QSD_ACQ, /* acquire space for requests */
- QSD_REL, /* release all space quota space uncondionnally */
- QSD_REP, /* report space usage during reintegration */
-};
-
#define QSD_RES_TYPE(qsd) ((qsd)->qsd_is_md ? LQUOTA_RES_MD : LQUOTA_RES_DT)
/* udpate record for slave & global index copy */
struct qsd_upd_rec {
- cfs_list_t qur_link; /* link into qsd_upd_list */
+ struct list_head qur_link; /* link into qsd_upd_list */
union lquota_id qur_qid;
union lquota_rec qur_rec;
struct qsd_qtype_info *qur_qqi;
struct ldlm_enqueue_info qti_einfo;
struct lustre_handle qti_lockh;
__u64 qti_slv_ver;
- union ldlm_wire_lvb qti_lvb;
+ struct lquota_lvb qti_lvb;
union {
struct quota_body qti_body;
struct idx_info qti_ii;
lqe->lqe_nopreacq = false;
}
+/* helper function to set/clear edquot flag */
+static inline void qsd_set_edquot(struct lquota_entry *lqe, bool edquot)
+{
+ lqe->lqe_edquot = edquot;
+ if (edquot)
+ lqe->lqe_edquot_time = cfs_time_current_64();
+}
+
#define QSD_WB_INTERVAL 60 /* 60 seconds */
+/* helper function calculating how long a service thread should be waiting for
+ * quota space */
+static inline int qsd_wait_timeout(struct qsd_instance *qsd)
+{
+ if (qsd->qsd_timeout != 0)
+ return qsd->qsd_timeout;
+ return min_t(int, at_max / 2, obd_timeout / 2);
+}
+
/* qsd_entry.c */
extern struct lquota_entry_operations qsd_lqe_ops;
int qsd_refresh_usage(const struct lu_env *, struct lquota_entry *);
struct qsd_qtype_info *,
struct quota_body *, struct quota_body *,
struct lustre_handle *,
- union ldlm_wire_lvb *, void *, int);
+ struct lquota_lvb *, void *, int);
int qsd_send_dqacq(const struct lu_env *, struct obd_export *,
struct quota_body *, bool, qsd_req_completion_t,
struct qsd_qtype_info *, struct lustre_handle *,
struct lquota_entry *);
int qsd_intent_lock(const struct lu_env *, struct obd_export *,
struct quota_body *, bool, int, qsd_req_completion_t,
- struct qsd_qtype_info *, union ldlm_wire_lvb *, void *);
+ struct qsd_qtype_info *, struct lquota_lvb *, void *);
int qsd_fetch_index(const struct lu_env *, struct obd_export *,
- struct idx_info *, unsigned int, cfs_page_t **, bool *);
+ struct idx_info *, unsigned int, struct page **, bool *);
/* qsd_writeback.c */
void qsd_bump_version(struct qsd_qtype_info *, __u64, bool);
int qsd_process_config(struct lustre_cfg *);
/* qsd_handler.c */
-int qsd_dqacq(const struct lu_env *, struct lquota_entry *, enum qsd_ops);
-__u64 qsd_calc_grants(struct lquota_entry *, __u64, __u32);
+int qsd_adjust(const struct lu_env *, struct lquota_entry *);
/* qsd_writeback.c */
void qsd_upd_schedule(struct qsd_qtype_info *, struct lquota_entry *,