+int quota_is_on(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+{
+ unsigned int type;
+
+ for (type = USRQUOTA; type < MAXQUOTAS; type++) {
+ if (!Q_TYPESET(oqctl, type))
+ continue;
+ if (!(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)))
+ return 0;
+ }
+ return 1;
+}
+
+int quota_is_off(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+{
+ unsigned int type;
+
+ for (type = USRQUOTA; type < MAXQUOTAS; type++) {
+ if (!Q_TYPESET(oqctl, type))
+ continue;
+ if (qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type))
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * When quotaon, build a lqs for every uid/gid who has been set limitation
+ * for quota. After quota_search_lqs, it will hold one ref for the lqs.
+ * It will be released when qctxt_cleanup() is executed b=18574
+ *
+ * Should be called with obt->obt_quotachecking held. b=20152
+ */
+void build_lqs(struct obd_device *obd)
+{
+ struct obd_device_target *obt = &obd->u.obt;
+ struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
+ struct list_head id_list;
+ int i, rc;
+
+ LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
+ INIT_LIST_HEAD(&id_list);
+ for (i = 0; i < MAXQUOTAS; i++) {
+ struct dquot_id *dqid, *tmp;
+
+ if (sb_dqopt(qctxt->lqc_sb)->files[i] == NULL)
+ continue;
+
+#ifndef KERNEL_SUPPORTS_QUOTA_READ
+ rc = fsfilt_qids(obd, sb_dqopt(qctxt->lqc_sb)->files[i], NULL,
+ i, &id_list);
+#else
+ rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
+ i, &id_list);
+#endif
+ if (rc) {
+ CERROR("%s: failed to get %s qids!\n", obd->obd_name,
+ i ? "group" : "user");
+ continue;
+ }
+
+ list_for_each_entry_safe(dqid, tmp, &id_list,
+ di_link) {
+ struct lustre_qunit_size *lqs;
+
+ list_del_init(&dqid->di_link);
+ lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
+ qctxt, 1);
+ if (lqs && !IS_ERR(lqs)) {
+ lqs->lqs_flags |= dqid->di_flag;
+ lqs_putref(lqs);
+ } else {
+ CERROR("%s: failed to create a lqs for %sid %u"
+ "\n", obd->obd_name, i ? "g" : "u",
+ dqid->di_id);
+ }
+
+ OBD_FREE_PTR(dqid);
+ }
+ }
+}
+
+/**
+ * lqs<->qctxt hash operations
+ */
+
+/**
+ * string hashing using djb2 hash algorithm
+ */
+static unsigned
+lqs_hash(cfs_hash_t *hs, void *key, unsigned mask)
+{
+ struct quota_adjust_qunit *lqs_key;
+ unsigned hash;
+ ENTRY;
+
+ LASSERT(key);
+ lqs_key = (struct quota_adjust_qunit *)key;
+ hash = (QAQ_IS_GRP(lqs_key) ? 5381 : 5387) * lqs_key->qaq_id;
+
+ RETURN(hash & mask);
+}
+
+static int
+lqs_compare(void *key, struct hlist_node *hnode)
+{
+ struct lustre_qunit_size *q;
+ int rc;
+ ENTRY;
+
+ LASSERT(key);
+ q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+
+ spin_lock(&q->lqs_lock);
+ rc = (q->lqs_key == *((unsigned long long *)key));
+ spin_unlock(&q->lqs_lock);
+
+ RETURN(rc);
+}
+
+static void *
+lqs_get(struct hlist_node *hnode)
+{
+ struct lustre_qunit_size *q =
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ ENTRY;
+
+ __lqs_getref(q);
+
+ RETURN(q);
+}
+
+static void *
+lqs_put(struct hlist_node *hnode)
+{
+ struct lustre_qunit_size *q =
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ ENTRY;
+
+ __lqs_putref(q);
+
+ RETURN(q);
+}
+
+static void
+lqs_exit(struct hlist_node *hnode)
+{
+ struct lustre_qunit_size *q =
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ ENTRY;
+
+ /*
+ * Nothing should be left. User of lqs put it and
+ * lqs also was deleted from table by this time
+ * so we should have 0 refs.
+ */
+ LASSERTF(atomic_read(&q->lqs_refcount) == 0,
+ "Busy lqs %p with %d refs\n", q,
+ atomic_read(&q->lqs_refcount));
+ OBD_FREE_PTR(q);
+ EXIT;
+}
+
+static cfs_hash_ops_t lqs_hash_ops = {
+ .hs_hash = lqs_hash,
+ .hs_compare = lqs_compare,
+ .hs_get = lqs_get,
+ .hs_put = lqs_put,
+ .hs_exit = lqs_exit
+};
+#endif /* HAVE_QUOTA_SUPPORT */