spin_unlock(&lqs->lqs_lock);
lqs_putref(lqs);
+
EXIT;
out:
OBD_FREE_PTR(qctl);
RETURN(rc);
cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
+ cfs_waitq_init(&qctxt->lqc_lqs_waitq);
+ atomic_set(&qctxt->lqc_lqs, 0);
spin_lock_init(&qctxt->lqc_lock);
spin_lock(&qctxt->lqc_lock);
qctxt->lqc_handler = handler;
RETURN(rc);
}
+static int check_lqs(struct lustre_quota_ctxt *qctxt)
+{
+ int rc;
+ ENTRY;
+
+ rc = !atomic_read(&qctxt->lqc_lqs);
+
+ RETURN(rc);
+}
+
void hash_put_lqs(void *obj, void *data)
{
{
struct lustre_qunit *qunit, *tmp;
struct list_head tmp_list;
+ struct l_wait_info lwi = { 0 };
struct obd_device_target *obt = qctxt->lqc_obt;
int i;
ENTRY;
qunit_put(qunit);
}
- lustre_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
- down_write(&obt->obt_rwsem);
- lustre_hash_exit(qctxt->lqc_lqs_hash);
- qctxt->lqc_lqs_hash = NULL;
- up_write(&obt->obt_rwsem);
-
/* after qctxt_cleanup, qctxt might be freed, then check_qm() is
* unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
cfs_time_seconds(1));
}
+ lustre_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
+ l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
+ down_write(&obt->obt_rwsem);
+ lustre_hash_exit(qctxt->lqc_lqs_hash);
+ qctxt->lqc_lqs_hash = NULL;
+ up_write(&obt->obt_rwsem);
+
ptlrpcd_decref();
#ifdef LPROCFS
int rc = 0;
ENTRY;
- ptlrpc_daemonize("qslave_recovd");
+ cfs_daemonize_ctxt("qslave_recovd");
/* for obdfilter */
class_incref(obd, "qslave_recovd_filter", obd);
EXIT;
}
+int quota_is_on(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+{
+ unsigned int type;
+
+ for (type = USRQUOTA; type < MAXQUOTAS; type++) {
+ if (!Q_TYPESET(oqctl, type))
+ continue;
+ if (!(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)))
+ return 0;
+ }
+ return 1;
+}
+
+int quota_is_off(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
+{
+ unsigned int type;
+
+ for (type = USRQUOTA; type < MAXQUOTAS; type++) {
+ if (!Q_TYPESET(oqctl, type))
+ continue;
+ if (qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type))
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * When quotaon, build a lqs for every uid/gid who has been set limitation
+ * for quota. After quota_search_lqs, it will hold one ref for the lqs.
+ * It will be released when qctxt_cleanup() is executed b=18574
+ *
+ * Should be called with obt->obt_quotachecking held. b=20152
+ */
+void build_lqs(struct obd_device *obd)
+{
+ struct obd_device_target *obt = &obd->u.obt;
+ struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
+ struct list_head id_list;
+ int i, rc;
+
+ LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
+ INIT_LIST_HEAD(&id_list);
+ for (i = 0; i < MAXQUOTAS; i++) {
+ struct dquot_id *dqid, *tmp;
+
+#ifndef KERNEL_SUPPORTS_QUOTA_READ
+ rc = fsfilt_qids(obd, sb_dqopt(qctxt->lqc_sb)->files[i], NULL,
+ i, &id_list);
+#else
+ rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
+ i, &id_list);
+#endif
+ if (rc) {
+ CDEBUG(D_ERROR, "fail to get %s qids!\n",
+ i ? "group" : "user");
+ continue;
+ }
+
+ list_for_each_entry_safe(dqid, tmp, &id_list,
+ di_link) {
+ struct lustre_qunit_size *lqs;
+
+ list_del_init(&dqid->di_link);
+ lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
+ qctxt, 1);
+ if (lqs && !IS_ERR(lqs)) {
+ lqs->lqs_flags |= dqid->di_flag;
+ lqs_putref(lqs);
+ } else {
+ CDEBUG(D_ERROR, "fail to create a lqs"
+ "(%s id: %u)!\n", i ? "group" : "user",
+ dqid->di_id);
+ }
+
+ OBD_FREE_PTR(dqid);
+ }
+ }
+}
/**
* lqs<->qctxt hash operations
lqs_get(struct hlist_node *hnode)
{
struct lustre_qunit_size *q =
- hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
- atomic_inc(&q->lqs_refcount);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
- q, atomic_read(&q->lqs_refcount));
+ __lqs_getref(q);
RETURN(q);
}
lqs_put(struct hlist_node *hnode)
{
struct lustre_qunit_size *q =
- hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
- LASSERT(atomic_read(&q->lqs_refcount) > 0);
- atomic_dec(&q->lqs_refcount);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
- q, atomic_read(&q->lqs_refcount));
+ __lqs_putref(q, 0);
RETURN(q);
}
static void
lqs_exit(struct hlist_node *hnode)
{
- struct lustre_qunit_size *q;
+ struct lustre_qunit_size *q =
+ hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
ENTRY;
- q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
/*
* Nothing should be left. User of lqs put it and
* lqs also was deleted from table by this time