+
+inline int quota_is_on(struct lustre_quota_ctxt *qctxt,
+ struct obd_quotactl *oqctl)
+{
+ return ((qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)) ==
+ UGQUOTA2LQC(oqctl->qc_type));
+}
+
+inline int quota_is_off(struct lustre_quota_ctxt *qctxt,
+ struct obd_quotactl *oqctl)
+{
+ return !(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type));
+}
+
+/**
+ * When quotaon, build a lqs for every uid/gid who has been set limitation
+ * for quota. After quota_search_lqs, it will hold one ref for the lqs.
+ * It will be released when qctxt_cleanup() is executed b=18574
+ *
+ * Should be called with obt->obt_quotachecking held. b=20152
+ */
+void build_lqs(struct obd_device *obd)
+{
+ struct obd_device_target *obt = &obd->u.obt;
+ struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
+ cfs_list_t id_list;
+ int i, rc;
+
+ LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
+ CFS_INIT_LIST_HEAD(&id_list);
+ for (i = 0; i < MAXQUOTAS; i++) {
+ struct dquot_id *dqid, *tmp;
+
+ if (sb_dqopt(qctxt->lqc_sb)->files[i] == NULL)
+ continue;
+
+#ifndef KERNEL_SUPPORTS_QUOTA_READ
+ rc = fsfilt_qids(obd, sb_dqopt(qctxt->lqc_sb)->files[i], NULL,
+ i, &id_list);
+#else
+ rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
+ i, &id_list);
+#endif
+ if (rc) {
+ CERROR("%s: failed to get %s qids!\n", obd->obd_name,
+ i ? "group" : "user");
+ continue;
+ }
+
+ cfs_list_for_each_entry_safe(dqid, tmp, &id_list,
+ di_link) {
+ struct lustre_qunit_size *lqs;
+
+ cfs_list_del_init(&dqid->di_link);
+ lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
+ qctxt, 1);
+ if (lqs && !IS_ERR(lqs)) {
+ lqs->lqs_flags |= dqid->di_flag;
+ lqs_putref(lqs);
+ } else {
+ CERROR("%s: failed to create a lqs for %sid %u"
+ "\n", obd->obd_name, i ? "g" : "u",
+ dqid->di_id);
+ }
+
+ OBD_FREE_PTR(dqid);
+ }
+ }
+}
+
+/**
+ * lqs<->qctxt hash operations
+ */
+
+/**
+ * string hashing using djb2 hash algorithm
+ */
+static unsigned
+lqs_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+ unsigned long long id;
+ unsigned hash;
+ ENTRY;
+
+ LASSERT(key);
+ id = *((unsigned long long *)key);
+ hash = (LQS_KEY_GRP(id) ? 5381 : 5387) * (unsigned)LQS_KEY_ID(id);
+
+ RETURN(hash & mask);
+}
+
+static void *
+lqs_key(cfs_hlist_node_t *hnode)
+{
+ struct lustre_qunit_size *lqs;
+ ENTRY;
+
+ lqs = cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ RETURN(&lqs->lqs_key);
+}
+
+static int
+lqs_keycmp(const void *key, cfs_hlist_node_t *hnode)
+{
+ struct lustre_qunit_size *q =
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+
+ RETURN(q->lqs_key == *((unsigned long long *)key));
+}
+
+static void *
+lqs_object(cfs_hlist_node_t *hnode)
+{
+ return cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+}
+
+static void
+lqs_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ struct lustre_qunit_size *q =
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+
+ lqs_getref(q);
+}
+
+static void
+lqs_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ struct lustre_qunit_size *q =
+ cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+
+ lqs_putref(q);
+}
+
+static void
+lqs_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+ CERROR("It should not have any item left to be handled by this!");
+}
+
+static cfs_hash_ops_t lqs_hash_ops = {
+ .hs_hash = lqs_hash,
+ .hs_key = lqs_key,
+ .hs_keycmp = lqs_keycmp,
+ .hs_object = lqs_object,
+ .hs_get = lqs_get,
+ .hs_put_locked = lqs_put_locked,
+ .hs_exit = lqs_exit
+};
+#endif /* HAVE_QUOTA_SUPPORT */