struct lquota_entry *lqe_gl;
int rc;
- lqe_gl = lqe->lqe_is_global ? lqe : NULL;
+ if (lqe->lqe_is_global && !lqe->lqe_enforced)
+ RETURN_EXIT;
+
rc = qmt_pool_lqes_lookup_spec(env, qmt, lqe_rtype(lqe),
lqe_qtype(lqe), &lqe->lqe_id);
- if (!qti_lqes_cnt(env))
+ if (rc)
GOTO(lqes_fini, rc);
- if (!lqe_gl && qti_lqes_glbl(env)->lqe_is_global)
- lqe_gl = qti_lqes_glbl(env);
-
- if (!lqe_gl)
- GOTO(lqes_fini, rc);
+ lqe_gl = qti_lqes_glbl(env);
+ /* If global lqe is not enforced, it is not added to qti_lqes array */
+ if (!lqe_gl->lqe_is_global)
+ GOTO(lqes_fini, 0);
+ mutex_lock(&lqe_gl->lqe_glbl_data_lock);
if (lqe_gl->lqe_glbl_data)
- qmt_seed_glbe(env, lqe_gl->lqe_glbl_data);
+ qmt_seed_glbe(env, lqe_gl->lqe_glbl_data, false);
+ mutex_unlock(&lqe_gl->lqe_glbl_data_lock);
+
/* Even if slaves haven't enqueued quota lock yet,
* it is needed to set lqe_revoke_time in qmt_id_lock_glimpse
* in case of reaching qpi_least_qunit */
th = qmt_trans_start(env, lqe);
if (IS_ERR(th))
GOTO(out_nolock, rc = PTR_ERR(th));
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_NOSYNC))
+ th->th_sync = 0;
}
now = ktime_get_real_seconds();
/* change quota limits */
lqe->lqe_hardlimit = hard;
lqe->lqe_softlimit = soft;
+ if (is_default) {
+ dirtied = true;
+ GOTO(quota_write, 0);
+ }
quota_set:
- /* recompute qunit in case it was never initialized */
- if (qmt_revalidate(env, lqe))
- need_id_notify = true;
-
/* clear grace time */
if (lqe->lqe_softlimit == 0 ||
lqe->lqe_granted <= lqe->lqe_softlimit)
dirtied = true;
}
+quota_write:
if (dirtied) {
if (!is_updated) {
/* write new quota settings to disk */
qmt_restore(lqe, &qti_lqes_rstr(env)[0]);
GOTO(out, rc);
}
- } else {
+ } else if (lqe->lqe_is_global) {
ver = dt_version_get(env, LQE_GLB_OBJ(lqe));
}
}
if (rc == 0 && dirtied) {
- qmt_glb_lock_notify(env, lqe, ver);
+ if (lqe->lqe_is_global)
+ qmt_glb_lock_notify(env, lqe, ver);
if (lqe->lqe_id.qid_uid == 0) {
struct qmt_entry_iter_data iter_data;
if (IS_ERR(lqe))
RETURN(PTR_ERR(lqe));
+ lqe->lqe_is_deleted = 0;
+ lqe->lqe_is_reset = 0;
rc = qmt_set_with_lqe(env, qmt, lqe, hard, soft, time, valid,
is_default, is_updated);
+ if (rc == 0)
+ lqe->lqe_is_deleted = 0;
+
lqe_putref(lqe);
RETURN(rc);
}
/*
+ * Delete the quota setting of the specified quota ID
+ *
+ * \param env - is the environment passed by the caller
+ * \param qmt - is the quota master target
+ * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
+ * inode (i.e. LQUOTA_RES_MD)
+ * \param qtype - is the quota type
+ * \param qid - is the quota indentifier for which we want to delete its
+ * quota settings.
+ */
+static int qmt_delete_qid(const struct lu_env *env, struct qmt_device *qmt,
+ __u8 restype, __u8 qtype, __u64 qid)
+{
+ struct qmt_thread_info *qti = qmt_info(env);
+ union lquota_id *quota_id = &qti->qti_id;
+ struct thandle *th = NULL;
+ struct qmt_pool_info *qpi = NULL;
+ struct lquota_entry *lqe = NULL;
+ __u64 ver = 0;
+ int rc;
+
+ ENTRY;
+
+ quota_id->qid_uid = qid;
+ lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, quota_id, NULL);
+ if (IS_ERR(lqe))
+ RETURN(PTR_ERR(lqe));
+
+ qpi = qmt_pool_lookup_glb(env, qmt, restype);
+ if (IS_ERR(qpi))
+ GOTO(out, rc = -ENOMEM);
+
+ th = qmt_trans_start(env, lqe);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_NOSYNC))
+ th->th_sync = 0;
+
+ lqe_write_lock(lqe);
+ rc = lquota_disk_delete(env, th,
+ qpi->qpi_glb_obj[qtype], qid, &ver);
+
+ lqe_write_unlock(lqe);
+ dt_trans_stop(env, qmt->qmt_child, th);
+
+ if (rc == 0) {
+ lqe_set_deleted(lqe);
+ qmt_glb_lock_notify(env, lqe, ver);
+ } else if (rc == -ENOENT) {
+ rc = 0;
+ }
+
+out:
+ if (!IS_ERR_OR_NULL(qpi))
+ qpi_putref(env, qpi);
+
+ lqe_putref(lqe);
+
+ RETURN(rc);
+}
+
+static int qmt_reset_slv_cb(const struct lu_env *env, struct lu_fid *glb_fid,
+ char *slv_name, struct lu_fid *slv_fid, void *arg)
+{
+ struct qmt_device *qmt = (struct qmt_device *)arg;
+ struct qmt_thread_info *qti = qmt_info(env);
+ struct dt_object *slv_obj = NULL;
+ struct lquota_slv_rec rec;
+ struct thandle *th = NULL;
+ int rc;
+
+ slv_obj = dt_locate(env, qmt->qmt_child, slv_fid);
+ if (IS_ERR(slv_obj))
+ GOTO(out, rc = PTR_ERR(slv_obj));
+
+ if (slv_obj->do_index_ops == NULL) {
+ rc = slv_obj->do_ops->do_index_try(env, slv_obj,
+ &dt_quota_slv_features);
+ if (rc) {
+ CERROR("%s: fail to setup slave idx for %s: rc = %d\n",
+ qmt->qmt_child->dd_lu_dev.ld_obd->obd_name,
+ slv_name, rc);
+ GOTO(out, rc);
+ }
+ }
+
+ th = qmt_trans_start(env, qti_lqes(env)[0]);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ rec.qsr_granted = 0;
+ rc = lquota_disk_write(env, th, slv_obj, &qti->qti_id,
+ (struct dt_rec *)&rec, 0, NULL);
+ if (rc)
+ CERROR("%s: failed to reset slave grant for %s: rc = %d\n",
+ qmt->qmt_child->dd_lu_dev.ld_obd->obd_name, slv_name,
+ rc);
+out:
+ if (!IS_ERR_OR_NULL(th))
+ dt_trans_stop(env, qmt->qmt_child, th);
+
+ if (slv_obj != NULL)
+ dt_object_put(env, slv_obj);
+ return 0;
+}
+
+/*
+ * Reset the quota of the quota ID, it will reset the soft/hard limit and grant
+ *
+ * \param env - is the environment passed by the caller
+ * \param qmt - is the quota master target
+ * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
+ * inode (i.e. LQUOTA_RES_MD)
+ * \param qtype - is the quota type
+ * \param qid - is the quota indentifier for which we want to delete its
+ * quota settings.
+ */
+static int qmt_reset_qid(const struct lu_env *env, struct qmt_device *qmt,
+ __u8 restype, __u8 qtype, __u64 qid)
+{
+ struct qmt_thread_info *qti = qmt_info(env);
+ union lquota_id *quota_id = &qti->qti_id;
+ struct qmt_pool_info *qpi = NULL;
+ struct lquota_entry *lqe = NULL;
+ struct thandle *th = NULL;
+ __u64 softlimit = 0, hardlimit = 0;
+ __u64 ver = 0;
+ int rc;
+
+ ENTRY;
+
+ quota_id->qid_uid = qid;
+ lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, quota_id, NULL);
+ if (IS_ERR(lqe))
+ RETURN(PTR_ERR(lqe));
+
+ qpi = lqe2qpi(lqe);
+ if (IS_ERR(qpi))
+ GOTO(out, rc = -ENOMEM);
+
+ th = qmt_trans_start(env, lqe);
+ if (IS_ERR(th))
+ GOTO(out, rc = PTR_ERR(th));
+
+ lqe_write_lock(lqe);
+
+ softlimit = lqe->lqe_softlimit;
+ hardlimit = lqe->lqe_hardlimit;
+
+ lqe->lqe_softlimit = 0;
+ lqe->lqe_hardlimit = 0;
+ lqe->lqe_granted = 0;
+ lqe->lqe_edquot = 0;
+ lqe->lqe_qunit = 0;
+ lqe->lqe_is_default = 0;
+ lqe->lqe_is_deleted = 0;
+ lqe->lqe_is_reset = 1;
+ rc = qmt_glb_write(env, th, lqe, LQUOTA_BUMP_VER, &ver);
+ if (rc) {
+ LQUOTA_ERROR(lqe, "failed to write quota global rec\n");
+
+ if (softlimit != 0)
+ lqe->lqe_softlimit = softlimit;
+ if (hardlimit != 0)
+ lqe->lqe_hardlimit = hardlimit;
+ lqe->lqe_is_reset = 0;
+ }
+
+ lqe_write_unlock(lqe);
+ dt_trans_stop(env, qmt->qmt_child, th);
+ if (rc)
+ GOTO(out, rc);
+
+ lquota_generate_fid(&qti->qti_fid, restype, qtype);
+ qti_lqes(env)[0] = lqe;
+ lquota_disk_for_each_slv(env, qpi->qpi_root, &qti->qti_fid,
+ qmt_reset_slv_cb, qmt);
+
+ qmt_glb_lock_notify(env, lqe, ver);
+
+out:
+ lqe_putref(lqe);
+
+ RETURN(rc);
+}
+/*
* Handle quotactl request.
*
* \param env - is the environment passed by the caller
* \param oqctl - is the quotactl request
*/
static int qmt_quotactl(const struct lu_env *env, struct lu_device *ld,
- struct obd_quotactl *oqctl)
+ struct obd_quotactl *oqctl, char *buffer, int size)
{
struct qmt_thread_info *qti = qmt_info(env);
union lquota_id *id = &qti->qti_id;
struct qmt_device *qmt = lu2qmt_dev(ld);
+ struct dt_object *glb_obj;
struct obd_dqblk *dqb = &oqctl->qc_dqblk;
+ struct qmt_pool_info *pool;
char *poolname;
int rc = 0;
bool is_default = false;
+ bool is_first_iter = false;
ENTRY;
LASSERT(qmt != NULL);
poolname);
break;
+ case LUSTRE_Q_ITERQUOTA:
+ if (oqctl->qc_iter_md_offset == 0 &&
+ oqctl->qc_iter_dt_offset == 0)
+ is_first_iter = true;
+
+ if (is_first_iter || oqctl->qc_iter_md_offset != 0) {
+ pool = qmt_pool_lookup_name(env, qmt, LQUOTA_RES_MD,
+ NULL);
+ if (IS_ERR(pool))
+ RETURN(PTR_ERR(pool));
+
+ glb_obj = pool->qpi_glb_obj[oqctl->qc_type];
+ rc = lquota_obj_iter(env, lu2dt_dev(ld), glb_obj,
+ oqctl, buffer, size / 2, true, true);
+
+ qpi_putref(env, pool);
+
+ if (rc < 0 && rc != -ENOENT)
+ break;
+
+ rc = 0;
+ } else {
+ oqctl->qc_iter_md_buflen = 0;
+ }
+
+ if (is_first_iter || oqctl->qc_iter_dt_offset != 0) {
+ pool = qmt_pool_lookup_name(env, qmt, LQUOTA_RES_DT,
+ NULL);
+ if (IS_ERR(pool))
+ RETURN(PTR_ERR(pool));
+
+ glb_obj = pool->qpi_glb_obj[oqctl->qc_type];
+ rc = lquota_obj_iter(env, lu2dt_dev(ld), glb_obj,
+ oqctl, buffer + size / 2, size / 2,
+ true, false);
+ qpi_putref(env, pool);
+
+ if (rc < 0 && rc != -ENOENT)
+ break;
+
+ rc = 0;
+ } else {
+ oqctl->qc_iter_dt_buflen = 0;
+ }
+ break;
+
case LUSTRE_Q_GETDEFAULT:
case LUSTRE_Q_GETDEFAULT_POOL:
is_default = true;
false, poolname);
break;
+ case LUSTRE_Q_DELETEQID:
+ rc = qmt_delete_qid(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
+ oqctl->qc_id);
+ if (rc)
+ break;
+
+ rc = qmt_delete_qid(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
+ oqctl->qc_id);
+ break;
+
+ case LUSTRE_Q_RESETQID:
+ if (oqctl->qc_id == 0)
+ RETURN(-EINVAL);
+
+ id->qid_uid = oqctl->qc_id;
+ /* save the quota setting before resetting */
+ rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id,
+ &dqb->dqb_ihardlimit, &dqb->dqb_isoftlimit,
+ &dqb->dqb_itime, false, NULL);
+ if (rc)
+ break;
+ else
+ dqb->dqb_valid |= QIF_ILIMITS | QIF_ITIME;
+
+ rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id,
+ &dqb->dqb_bhardlimit, &dqb->dqb_bsoftlimit,
+ &dqb->dqb_btime, false, NULL);
+ if (rc)
+ break;
+
+ dqb->dqb_valid |= QIF_BLIMITS | QIF_BTIME;
+ dqb->dqb_curinodes = 0;
+ dqb->dqb_curspace = 0;
+
+ /* reset the corresponding quota ID */
+ rc = qmt_reset_qid(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
+ oqctl->qc_id);
+ if (rc)
+ break;
+
+ rc = qmt_reset_qid(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
+ oqctl->qc_id);
+ break;
+
+
default:
CERROR("%s: unsupported quotactl command: %d\n",
qmt->qmt_svname, oqctl->qc_cmd);
return can_release;
}
-static inline void qmt_rel_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt)
+static inline void qmt_rel_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt,
+ bool reset)
{
int i;
- for (i = 0; i < qti_lqes_cnt(env); i++)
- qti_lqe_granted(env, i) -= cnt;
+ for (i = 0; i < qti_lqes_cnt(env); i++) {
+ if (reset)
+ qti_lqe_granted(env, i) = 0;
+ else
+ qti_lqe_granted(env, i) -= cnt;
+ }
- *slv -= cnt;
+ if (reset)
+ *slv = 0;
+ else
+ *slv -= cnt;
}
static inline bool qmt_lqes_cannot_grant(const struct lu_env *env, __u64 cnt)
* acquire/release
* \param qb_usage - is the current space usage on the slave
* \param repbody - is the quota_body of reply
+ * \param idx - is the index of a slave target
*
* \retval 0 : success
* \retval -EDQUOT : out of quota
*/
int qmt_dqacq0(const struct lu_env *env, struct qmt_device *qmt,
struct obd_uuid *uuid, __u32 qb_flags, __u64 qb_count,
- __u64 qb_usage, struct quota_body *repbody)
+ __u64 qb_usage, struct quota_body *repbody, int idx)
{
- __u64 now, count;
+ __u64 now, count = 0;
struct dt_object *slv_obj = NULL;
__u64 slv_granted, slv_granted_bck;
struct thandle *th = NULL;
memset(repbody, 0, sizeof(*repbody));
memcpy(&repbody->qb_id, &lqe->lqe_id, sizeof(repbody->qb_id));
- if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RECOVERABLE_ERR))
+ if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_RECOVERABLE_ERR))
RETURN(-cfs_fail_val);
- if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_PREACQ) &&
+ if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_PREACQ) &&
(req_is_preacq(qb_flags) || req_is_rel(qb_flags)))
RETURN(-EAGAIN);
if (req_is_acq(qb_flags) && qb_count == 0)
GOTO(out_locked, rc = 0);
+ if (lqe->lqe_is_reset) {
+ lqe->lqe_granted = 0;
+ repbody->qb_count = qb_count;
+ qmt_rel_lqes(env, &slv_granted, qb_count, lqe->lqe_is_reset);
+ GOTO(out_locked, rc = 0);
+ }
+
/* fetch how much quota space is already granted to this slave */
rc = qmt_slv_read(env, &lqe->lqe_id, slv_obj, &slv_granted);
if (rc) {
repbody->qb_count = qb_count;
/* put released space back to global pool */
- qmt_rel_lqes(env, &slv_granted, qb_count);
+ qmt_rel_lqes(env, &slv_granted, qb_count, lqe->lqe_is_reset);
GOTO(out_write, rc = 0);
}
/* start/stop grace timer if required */
qmt_lqes_tune_grace(env, now);
+ if (CFS_FAIL_CHECK(OBD_FAIL_QUOTA_GRANT))
+ slv_granted = 0xFFFFFFFFFFF00000;
+
/* Update slave index first since it is easier to roll back */
ret = qmt_slv_write(env, th, lqe, slv_obj, LQUOTA_BUMP_VER,
&repbody->qb_slv_ver, slv_granted);
* size according to the total granted & limits. */
/* clear/set edquot flag and notify slaves via glimpse if needed */
- qmt_adjust_and_notify(env, qmt, now, qb_flags);
+ qmt_adjust_notify_nu(env, qmt, now, qb_flags, idx);
out_locked:
LQUOTA_DEBUG_LQES(env, "dqacq ends count:%llu ver:%llu rc:%d",
repbody->qb_count, repbody->qb_slv_ver, rc);
dt_object_put(env, slv_obj);
if ((req_is_acq(qb_flags) || req_is_preacq(qb_flags)) &&
- OBD_FAIL_CHECK(OBD_FAIL_QUOTA_EDQUOT)) {
+ CFS_FAIL_CHECK(OBD_FAIL_QUOTA_EDQUOT)) {
/* introduce inconsistency between granted value in slave index
* and slave index copy of slave */
repbody->qb_count = 0;
* \retval slave type(QMT_STYPE_MDT or QMT_STYPE_OST)
* \retval -EINVAL wrong uuid
*/
-int qmt_uuid2idx(struct obd_uuid *uuid, int *idx)
+enum qmt_stype qmt_uuid2idx(struct obd_uuid *uuid, int *idx)
{
char *uuid_str, *name, *dash;
int rc = -EINVAL;
struct quota_body *qbody, *repbody;
struct obd_uuid *uuid;
struct ldlm_lock *lock;
- int rtype, qtype;
- int rc, idx, stype;
+ enum lquota_res_type rtype;
+ enum lquota_type qtype;
+ enum qmt_stype stype;
+ int rc, idx;
+ struct obd_device *obd = NULL;
+
ENTRY;
+ if (req->rq_export)
+ obd = req->rq_export->exp_obd;
+
qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
if (qbody == NULL)
RETURN(err_serious(-EPROTO));
timeout_t timeout;
svc = req->rq_rqbd->rqbd_svcpt;
- timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
+ timeout = at_est2timeout(
+ obd_at_get(obd, &svc->scp_at_estimate));
+
timeout += (ldlm_bl_timeout(lock) >> 1);
/* lock is being cancelled, prolong timeout */
RETURN(rc);
rc = qmt_dqacq0(env, qmt, uuid, qbody->qb_flags,
- qbody->qb_count, qbody->qb_usage, repbody);
+ qbody->qb_count, qbody->qb_usage, repbody,
+ qmt_dom(rtype, stype) ? -1 : idx);
if (lustre_handle_is_used(&qbody->qb_lockh))
/* return current qunit value only to slaves owning an per-ID