qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
hash_lqs_cur_bits,
HASH_LQS_MAX_BITS,
- &lqs_hash_ops, CFS_HASH_REHASH);
+ min(hash_lqs_cur_bits,
+ HASH_LQS_BKT_BITS),
+ 0, CFS_HASH_MIN_THETA,
+ CFS_HASH_MAX_THETA,
+ &lqs_hash_ops, CFS_HASH_DEFAULT);
if (!qctxt->lqc_lqs_hash) {
CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
RETURN(-ENOMEM);
RETURN(rc);
}
-
-void hash_put_lqs(void *obj, void *data)
+int qctxt_del_lqs(cfs_hash_t *hs, cfs_hash_bd_t *bd,
+ cfs_hlist_node_t *hnode, void *data)
{
- lqs_putref((struct lustre_qunit_size *)obj);
+ /* remove from hash and -1 refcount */
+ cfs_hash_bd_del_locked(hs, bd, hnode);
+ return 0;
}
void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
cfs_time_seconds(1));
}
- cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
+ /* release refcount on lustre_qunit_size holding by lqs_hash */
+ cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, qctxt_del_lqs, NULL);
+
l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
cfs_down_write(&obt->obt_rwsem);
cfs_hash_putref(qctxt->lqc_lqs_hash);
EXIT;
}
-int quota_is_on(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+inline int quota_is_on(struct lustre_quota_ctxt *qctxt,
+ struct obd_quotactl *oqctl)
{
- unsigned int type;
-
- for (type = USRQUOTA; type < MAXQUOTAS; type++) {
- if (!Q_TYPESET(oqctl, type))
- continue;
- if (!(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)))
- return 0;
- }
- return 1;
+ return ((qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)) ==
+ UGQUOTA2LQC(oqctl->qc_type));
}
-int quota_is_off(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+inline int quota_is_off(struct lustre_quota_ctxt *qctxt,
+ struct obd_quotactl *oqctl)
{
- unsigned int type;
-
- for (type = USRQUOTA; type < MAXQUOTAS; type++) {
- if (!Q_TYPESET(oqctl, type))
- continue;
- if (qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type))
- return 0;
- }
- return 1;
+ return !(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type));
}
/**
}
static int
-lqs_compare(void *key, cfs_hlist_node_t *hnode)
+lqs_keycmp(void *key, cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
}
static void *
-lqs_get(cfs_hlist_node_t *hnode)
+lqs_object(cfs_hlist_node_t *hnode)
{
- struct lustre_qunit_size *q =
- cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
- ENTRY;
-
- __lqs_getref(q);
-
- RETURN(q);
+ return cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
}
-static void *
-lqs_put(cfs_hlist_node_t *hnode)
+static void
+lqs_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
- ENTRY;
-
- __lqs_putref(q);
- RETURN(q);
+ lqs_getref(q);
}
static void
-lqs_exit(cfs_hlist_node_t *hnode)
+lqs_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
{
struct lustre_qunit_size *q =
cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
- ENTRY;
- /*
- * Nothing should be left. User of lqs put it and
- * lqs also was deleted from table by this time
- * so we should have 0 refs.
- */
- LASSERTF(cfs_atomic_read(&q->lqs_refcount) == 0,
- "Busy lqs %p with %d refs\n", q,
- cfs_atomic_read(&q->lqs_refcount));
- OBD_FREE_PTR(q);
- EXIT;
+ lqs_putref(q);
+}
+
+static void
+lqs_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ CERROR("It should not have any item left to be handled by this!");
}
static cfs_hash_ops_t lqs_hash_ops = {
- .hs_hash = lqs_hash,
- .hs_key = lqs_key,
- .hs_compare = lqs_compare,
- .hs_get = lqs_get,
- .hs_put = lqs_put,
- .hs_exit = lqs_exit
+ .hs_hash = lqs_hash,
+ .hs_key = lqs_key,
+ .hs_keycmp = lqs_keycmp,
+ .hs_object = lqs_object,
+ .hs_get = lqs_get,
+ .hs_put_locked = lqs_put_locked,
+ .hs_exit = lqs_exit
};
#endif /* HAVE_QUOTA_SUPPORT */