struct lquota_entry *lqe;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
/* Only one reference held by hash table, and nobody else can
* grab the entry at this moment, it's safe to remove it from
* the hash and free it. */
- if (atomic_read(&lqe->lqe_ref) == 1) {
+ if (kref_read(&lqe->lqe_ref) == 1) {
if (!lqe_is_master(lqe)) {
LASSERT(lqe->lqe_pending_write == 0);
LASSERT(lqe->lqe_pending_req == 0);
RETURN(ERR_PTR(-ENOMEM));
}
- atomic_set(&new->lqe_ref, 1); /* hold 1 for caller */
+ kref_init(&new->lqe_ref); /* hold 1 for caller */
new->lqe_id = *qid;
new->lqe_site = site;
INIT_LIST_HEAD(&new->lqe_link);
struct lquota_site *lqe_site;
/* reference counter */
- atomic_t lqe_ref;
+ struct kref lqe_ref;
/* linked to list of lqes which:
* - need quota space adjustment on slave
extern struct kmem_cache *lqe_kmem;
+/* lquota_lib.c */
+void lqe_ref_free(struct kref *kref);
+
/* helper routine to get/put reference on lquota_entry */
static inline void lqe_getref(struct lquota_entry *lqe)
{
LASSERT(lqe != NULL);
- atomic_inc(&lqe->lqe_ref);
+ kref_get(&lqe->lqe_ref);
}
static inline void lqe_putref(struct lquota_entry *lqe)
{
LASSERT(lqe != NULL);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
- if (atomic_dec_and_test(&lqe->lqe_ref))
- OBD_SLAB_FREE_PTR(lqe, lqe_kmem);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
+ kref_put(&lqe->lqe_ref, lqe_ref_free);
}
static inline int lqe_is_master(struct lquota_entry *lqe)
LU_CONTEXT_KEY_DEFINE(lquota, LCT_MD_THREAD | LCT_DT_THREAD | LCT_LOCAL);
LU_KEY_INIT_GENERIC(lquota);
+void lqe_ref_free(struct kref *kref)
+{
+ struct lquota_entry *lqe = container_of(kref, struct lquota_entry,
+ lqe_ref);
+
+ OBD_SLAB_FREE_PTR(lqe, lqe_kmem);
+}
+
static inline __u32 qtype2acct_oid(int qtype)
{
switch (qtype) {
struct lquota_entry *lqe;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
if (lqe->lqe_id.qid_uid == 0 || !lqe->lqe_is_default)
return 0;
* so no need to send glimpse callbacks.
*/
if (!kthread_should_stop() &&
- atomic_read(&lqe->lqe_ref) > 1)
+ kref_read(&lqe->lqe_ref) > 1)
qmt_id_lock_glimpse(env, qmt, lqe, NULL);
lqe_putref(lqe);
int old_num = 0, rc;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
rc = 0;
CDEBUG(D_QUOTA, "lgd %px\n", lqe->lqe_glbl_data);
struct lu_env *env = data;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
lqe_write_lock(lqe);
if (lqe->lqe_granted != lqe->lqe_recalc_granted) {
struct lquota_entry *lqe;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
if (lqe->lqe_id.qid_uid == 0 || !lqe->lqe_is_default)
return 0;
int *pending = (int *)data;
lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
- LASSERT(atomic_read(&lqe->lqe_ref) > 0);
+ LASSERT(kref_read(&lqe->lqe_ref) > 0);
lqe_read_lock(lqe);
*pending += lqe->lqe_pending_req;