4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include <obd_class.h>
34 #include "qmt_internal.h"
37 * Retrieve quota settings for a given identifier.
39 * \param env - is the environment passed by the caller
40 * \param qmt - is the quota master target
41 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or inode
42 * (i.e. LQUOTA_RES_MD)
43 * \param qtype - is the quota type
44 * \param id - is the quota indentifier for which we want to acces quota
46 * \param hard - is the output variable where to copy the hard limit
47 * \param soft - is the output variable where to copy the soft limit
48 * \param time - is the output variable where to copy the grace time
50 static int qmt_get(const struct lu_env *env, struct qmt_device *qmt,
51 __u8 restype, __u8 qtype, union lquota_id *id,
52 __u64 *hard, __u64 *soft, __u64 *time, bool is_default,
55 struct lquota_entry *lqe;
58 LASSERT(!is_default || id->qid_uid == 0);
59 if (pool_name && !strnlen(pool_name, LOV_MAXPOOLNAME))
62 /* look-up lqe structure containing quota settings */
63 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id, pool_name);
67 /* copy quota settings */
69 LQUOTA_DEBUG(lqe, "fetch settings");
71 *hard = lqe->lqe_hardlimit;
73 *soft = lqe->lqe_softlimit;
75 *time = lqe->lqe_gracetime;
76 if (lqe->lqe_is_default)
77 *time |= (__u64)LQUOTA_FLAG_DEFAULT <<
86 struct qmt_entry_iter_data {
87 const struct lu_env *qeid_env;
88 struct qmt_device *qeid_qmt;
91 static int qmt_entry_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
92 struct hlist_node *hnode, void *d)
94 struct qmt_entry_iter_data *iter = (struct qmt_entry_iter_data *)d;
95 struct lquota_entry *lqe;
97 lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
98 LASSERT(atomic_read(&lqe->lqe_ref) > 0);
100 if (lqe->lqe_id.qid_uid == 0 || !lqe->lqe_is_default)
103 return qmt_set_with_lqe(iter->qeid_env, iter->qeid_qmt, lqe, 0, 0, 0, 0,
107 static void qmt_set_id_notify(const struct lu_env *env, struct qmt_device *qmt,
108 struct lquota_entry *lqe)
110 struct lquota_entry *lqe_gl;
113 lqe_gl = lqe->lqe_is_global ? lqe : NULL;
114 rc = qmt_pool_lqes_lookup_spec(env, qmt, lqe_rtype(lqe),
115 lqe_qtype(lqe), &lqe->lqe_id);
116 if (!qti_lqes_cnt(env))
119 if (!lqe_gl && qti_lqes_glbl(env)->lqe_is_global)
120 lqe_gl = qti_lqes_glbl(env);
125 if (lqe_gl->lqe_glbl_data)
126 qmt_seed_glbe(env, lqe_gl->lqe_glbl_data);
127 /* Even if slaves haven't enqueued quota lock yet,
128 * it is needed to set lqe_revoke_time in qmt_id_lock_glimpse
129 * in case of reaching qpi_least_qunit */
130 qmt_id_lock_notify(qmt, lqe_gl);
136 * Update quota settings for a given lqe.
138 * \param env - is the environment passed by the caller
139 * \param qmt - is the quota master target
140 * \param lqe - is the lquota_entry for which we want to modify quota
142 * \param hard - is the new hard limit
143 * \param soft - is the new soft limit
144 * \param time - is the new grace time
145 * \param valid - is the list of settings to change
146 * \param is_default - true for default quota setting
147 * \param is_updated - true if the lqe is updated and no need to write back
150 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
151 struct lquota_entry *lqe, __u64 hard, __u64 soft,
152 __u64 time, __u32 valid, bool is_default, bool is_updated)
154 struct thandle *th = NULL;
157 bool dirtied = false;
159 int need_id_notify = 0;
162 /* need to write back to global quota file? */
164 /* By default we should have here only 1 lqe,
165 * so no allocations should be done. */
166 if (qti_lqes_restore_init(env))
167 GOTO(out_nolock, rc = -ENOMEM);
168 /* allocate & start transaction with enough credits to update
169 * quota settings in the global index file */
170 th = qmt_trans_start(env, lqe);
172 GOTO(out_nolock, rc = PTR_ERR(th));
175 now = ktime_get_real_seconds();
179 "changing quota settings valid:%x hard:%llu soft:%llu time:%llu",
180 valid, hard, soft, time);
182 if (is_default && lqe->lqe_id.qid_uid != 0) {
183 LQUOTA_DEBUG(lqe, "set qid %llu to use default quota setting",
184 lqe->lqe_id.qid_uid);
186 qmt_lqe_set_default(env, lqe->lqe_site->lqs_parent, lqe, false);
190 if ((valid & QIF_TIMES) != 0 && lqe->lqe_gracetime != time) {
191 /* change time settings */
192 lqe->lqe_gracetime = time;
196 if ((valid & QIF_LIMITS) != 0 &&
197 (lqe->lqe_hardlimit != hard || lqe->lqe_softlimit != soft)) {
198 rc = qmt_validate_limits(lqe, hard, soft);
202 /* change quota limits */
203 lqe->lqe_hardlimit = hard;
204 lqe->lqe_softlimit = soft;
207 /* recompute qunit in case it was never initialized */
208 if (qmt_revalidate(env, lqe))
211 /* clear grace time */
212 if (lqe->lqe_softlimit == 0 ||
213 lqe->lqe_granted <= lqe->lqe_softlimit)
214 /* no soft limit or below soft limit, let's clear grace
216 lqe->lqe_gracetime = 0;
217 else if ((valid & QIF_TIMES) == 0)
218 /* set grace only if user hasn't provided his own */
219 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
221 /* change enforced status based on new parameters */
222 if (lqe->lqe_id.qid_uid == 0 || (lqe->lqe_hardlimit == 0 &&
223 lqe->lqe_softlimit == 0))
224 lqe->lqe_enforced = false;
226 lqe->lqe_enforced = true;
231 if (!is_default && lqe->lqe_is_default) {
232 LQUOTA_DEBUG(lqe, "the qid %llu has been set quota"
233 " explicitly, clear the default flag",
234 lqe->lqe_id.qid_uid);
236 qmt_lqe_clear_default(lqe);
242 /* write new quota settings to disk */
243 rc = qmt_glb_write(env, th, lqe, LQUOTA_BUMP_VER, &ver);
245 /* restore initial quota settings */
246 qmt_restore(lqe, &qti_lqes_rstr(env)[0]);
250 ver = dt_version_get(env, LQE_GLB_OBJ(lqe));
253 /* compute new qunit value now that we have modified the quota
254 * settings or clear/set edquot flag if needed */
255 if (qmt_adjust_qunit(env, lqe) || qmt_adjust_edquot(lqe, now))
260 lqe_write_unlock(lqe);
263 qti_lqes_restore_fini(env);
264 if (th != NULL && !IS_ERR(th))
265 dt_trans_stop(env, qmt->qmt_child, th);
267 if (rc == 0 && dirtied) {
268 qmt_glb_lock_notify(env, lqe, ver);
269 if (lqe->lqe_id.qid_uid == 0) {
270 struct qmt_entry_iter_data iter_data;
272 LQUOTA_DEBUG(lqe, "notify all lqe with default quota");
273 iter_data.qeid_env = env;
274 iter_data.qeid_qmt = qmt;
275 cfs_hash_for_each_safe(lqe->lqe_site->lqs_hash,
276 qmt_entry_iter_cb, &iter_data);
277 /* Always notify slaves with default values. Don't
278 * care about overhead as will be sent only not changed
279 * values(see qmt_id_lock_cb for details).*/
282 if (need_id_notify && !is_updated)
283 qmt_set_id_notify(env, qmt, lqe);
290 * Update quota settings for a given identifier.
292 * \param env - is the environment passed by the caller
293 * \param qmt - is the quota master target
294 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
295 * inode (i.e. LQUOTA_RES_MD)
296 * \param qtype - is the quota type
297 * \param id - is the quota indentifier for which we want to modify
299 * \param hard - is the new hard limit
300 * \param soft - is the new soft limit
301 * \param time - is the new grace time
302 * \param valid - is the list of settings to change
303 * \param is_default - true for default quota setting
304 * \param is_updated - true if the lqe is updated and no need to write back
306 static int qmt_set(const struct lu_env *env, struct qmt_device *qmt,
307 __u8 restype, __u8 qtype, union lquota_id *id,
308 __u64 hard, __u64 soft, __u64 time, __u32 valid,
309 bool is_default, bool is_updated, char *pool_name)
311 struct lquota_entry *lqe;
315 if (pool_name && !strnlen(pool_name, LOV_MAXPOOLNAME))
318 /* look-up quota entry associated with this ID */
319 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id, pool_name);
321 RETURN(PTR_ERR(lqe));
323 rc = qmt_set_with_lqe(env, qmt, lqe, hard, soft, time, valid,
324 is_default, is_updated);
330 * Handle quotactl request.
332 * \param env - is the environment passed by the caller
333 * \param ld - is the lu device associated with the qmt
334 * \param oqctl - is the quotactl request
336 static int qmt_quotactl(const struct lu_env *env, struct lu_device *ld,
337 struct obd_quotactl *oqctl)
339 struct qmt_thread_info *qti = qmt_info(env);
340 union lquota_id *id = &qti->qti_id;
341 struct qmt_device *qmt = lu2qmt_dev(ld);
342 struct obd_dqblk *dqb = &oqctl->qc_dqblk;
345 bool is_default = false;
348 LASSERT(qmt != NULL);
350 if (oqctl->qc_type >= LL_MAXQUOTAS)
351 /* invalid quota type */
354 poolname = LUSTRE_Q_CMD_IS_POOL(oqctl->qc_cmd) ?
355 oqctl->qc_poolname : NULL;
357 switch (oqctl->qc_cmd) {
359 case Q_GETINFO: /* read grace times */
360 case LUSTRE_Q_GETINFOPOOL:
361 /* Global grace time is stored in quota settings of ID 0. */
364 /* read inode grace time */
365 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id, NULL,
366 NULL, &oqctl->qc_dqinfo.dqi_igrace,
368 /* There could be no MD pool, so try to find DT pool */
369 if (rc && rc != -ENOENT)
372 /* read block grace time */
373 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id, NULL,
374 NULL, &oqctl->qc_dqinfo.dqi_bgrace,
378 case Q_SETINFO: /* modify grace times */
379 case LUSTRE_Q_SETINFOPOOL:
380 /* setinfo should be using dqi->dqi_valid, but lfs incorrectly
381 * sets the valid flags in dqb->dqb_valid instead, try to live
384 /* Global grace time is stored in quota settings of ID 0. */
387 if ((dqb->dqb_valid & QIF_ITIME) != 0) {
388 /* set inode grace time */
389 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
390 id, 0, 0, oqctl->qc_dqinfo.dqi_igrace,
391 QIF_TIMES, false, false,
397 if ((dqb->dqb_valid & QIF_BTIME) != 0)
398 /* set block grace time */
399 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
400 id, 0, 0, oqctl->qc_dqinfo.dqi_bgrace,
401 QIF_TIMES, false, false,
405 case LUSTRE_Q_GETDEFAULT:
409 case Q_GETQUOTA: /* consult quota limit */
410 case LUSTRE_Q_GETQUOTAPOOL:
411 /* extract quota ID from quotactl request */
412 id->qid_uid = oqctl->qc_id;
414 /* look-up inode quota settings */
415 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id,
416 &dqb->dqb_ihardlimit, &dqb->dqb_isoftlimit,
417 &dqb->dqb_itime, is_default, poolname);
418 /* There could be no MD pool, so try to find DT pool */
419 if (rc && rc != -ENOENT)
422 dqb->dqb_valid |= QIF_ILIMITS | QIF_ITIME;
424 /* master isn't aware of actual inode usage */
425 dqb->dqb_curinodes = 0;
427 /* look-up block quota settings */
428 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id,
429 &dqb->dqb_bhardlimit, &dqb->dqb_bsoftlimit,
430 &dqb->dqb_btime, is_default, poolname);
434 dqb->dqb_valid |= QIF_BLIMITS | QIF_BTIME;
435 /* master doesn't know the actual block usage */
436 dqb->dqb_curspace = 0;
439 case LUSTRE_Q_SETDEFAULT:
443 case Q_SETQUOTA: /* change quota limits */
444 case LUSTRE_Q_SETQUOTAPOOL:
445 /* extract quota ID from quotactl request */
446 id->qid_uid = oqctl->qc_id;
448 if ((dqb->dqb_valid & QIF_IFLAGS) != 0) {
449 /* update inode quota settings */
450 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
451 id, dqb->dqb_ihardlimit,
452 dqb->dqb_isoftlimit, dqb->dqb_itime,
453 dqb->dqb_valid & QIF_IFLAGS, is_default,
459 if ((dqb->dqb_valid & QIF_BFLAGS) != 0)
460 /* update block quota settings */
461 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
462 id, dqb->dqb_bhardlimit,
463 dqb->dqb_bsoftlimit, dqb->dqb_btime,
464 dqb->dqb_valid & QIF_BFLAGS, is_default,
469 CERROR("%s: unsupported quotactl command: %d\n",
470 qmt->qmt_svname, oqctl->qc_cmd);
478 void qmt_grant_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt)
482 for (i = 0; i < qti_lqes_cnt(env); i++)
483 qti_lqe_granted(env, i) += cnt;
488 static inline bool qmt_lqes_can_rel(const struct lu_env *env, __u64 cnt)
490 bool can_release = true;
493 for (i = 0; i < qti_lqes_cnt(env); i++) {
494 if (cnt > qti_lqe_granted(env, i)) {
495 LQUOTA_ERROR(qti_lqes(env)[i],
496 "Can't release %llu that is larger than lqe_granted.\n",
504 static inline void qmt_rel_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt)
508 for (i = 0; i < qti_lqes_cnt(env); i++)
509 qti_lqe_granted(env, i) -= cnt;
514 static inline bool qmt_lqes_cannot_grant(const struct lu_env *env, __u64 cnt)
516 bool cannot_grant = false;
519 for (i = 0; i < qti_lqes_cnt(env); i++) {
520 if (qti_lqe_hard(env, i) != 0 &&
521 qti_lqe_granted(env, i) + cnt > qti_lqe_hard(env, i)) {
529 static inline __u64 qmt_lqes_grant_some_quota(const struct lu_env *env)
531 __u64 min_count, tmp;
535 for (i = 0, min_count = 0; i < qti_lqes_cnt(env); i++) {
536 if (!qti_lqes(env)[i]->lqe_enforced &&
537 !qti_lqes(env)[i]->lqe_is_global)
540 tmp = qti_lqe_hard(env, i) - qti_lqe_granted(env, i);
542 min_count = tmp < min_count ? tmp : min_count;
551 static inline __u64 qmt_lqes_alloc_expand(const struct lu_env *env,
552 __u64 slv_granted, __u64 spare)
554 __u64 min_count, tmp;
558 for (i = 0, min_count = 0; i < qti_lqes_cnt(env); i++) {
559 /* Don't take into account not enforced lqes that belong
560 * to non global pool. These lqes present in array to
561 * support actual lqe_granted even for lqes without limits. */
562 if (!qti_lqes(env)[i]->lqe_enforced &&
563 !qti_lqes(env)[i]->lqe_is_global)
566 tmp = qmt_alloc_expand(qti_lqes(env)[i], slv_granted, spare);
568 min_count = tmp < min_count ? tmp : min_count;
577 static inline void qmt_lqes_tune_grace(const struct lu_env *env, __u64 now)
581 for (i = 0; i < qti_lqes_cnt(env); i++) {
582 struct lquota_entry *lqe;
584 lqe = qti_lqes(env)[i];
585 if (lqe->lqe_softlimit != 0) {
586 if (lqe->lqe_granted > lqe->lqe_softlimit &&
587 lqe->lqe_gracetime == 0) {
588 /* First time over soft limit, let's start grace
590 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
591 } else if (lqe->lqe_granted <= lqe->lqe_softlimit &&
592 lqe->lqe_gracetime != 0) {
593 /* Clear grace timer */
594 lqe->lqe_gracetime = 0;
601 * Helper function to handle quota request from slave.
603 * \param env - is the environment passed by the caller
604 * \param qmt - is the master device
605 * \param uuid - is the uuid associated with the slave
606 * \param qb_flags - are the quota request flags as packed in the quota_body
607 * \param qb_count - is the amount of quota space the slave wants to
609 * \param qb_usage - is the current space usage on the slave
610 * \param repbody - is the quota_body of reply
612 * \retval 0 : success
613 * \retval -EDQUOT : out of quota
614 * -EINPROGRESS : inform client to retry write/create
615 * -ve : other appropriate errors
617 int qmt_dqacq0(const struct lu_env *env, struct qmt_device *qmt,
618 struct obd_uuid *uuid, __u32 qb_flags, __u64 qb_count,
619 __u64 qb_usage, struct quota_body *repbody)
622 struct dt_object *slv_obj = NULL;
623 __u64 slv_granted, slv_granted_bck;
624 struct thandle *th = NULL;
626 struct lquota_entry *lqe = qti_lqes_glbl(env);
629 LASSERT(uuid != NULL);
631 /* initialize reply */
632 memset(repbody, 0, sizeof(*repbody));
633 memcpy(&repbody->qb_id, &lqe->lqe_id, sizeof(repbody->qb_id));
635 if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RECOVERABLE_ERR))
636 RETURN(-cfs_fail_val);
638 if (qti_lqes_restore_init(env))
641 /* look-up index file associated with acquiring slave */
642 slv_obj = lquota_disk_slv_find(env, qmt->qmt_child, LQE_ROOT(lqe),
643 lu_object_fid(&LQE_GLB_OBJ(lqe)->do_lu),
646 GOTO(out, rc = PTR_ERR(slv_obj));
648 /* pack slave fid in reply just for sanity check */
649 memcpy(&repbody->qb_slv_fid, lu_object_fid(&slv_obj->do_lu),
650 sizeof(struct lu_fid));
652 /* allocate & start transaction with enough credits to update
653 * global & slave indexes */
654 th = qmt_trans_start_with_slv(env, NULL, slv_obj, false);
656 GOTO(out, rc = PTR_ERR(th));
658 qti_lqes_write_lock(env);
660 LQUOTA_DEBUG_LQES(env, "dqacq starts uuid:%s flags:0x%x wanted:%llu"
661 " usage:%llu", obd_uuid2str(uuid), qb_flags, qb_count,
664 /* Legal race, limits have been removed on master, but slave didn't
665 * receive the change yet. Just return EINPROGRESS until the slave gets
667 if (!lqe->lqe_enforced && !req_is_rel(qb_flags))
668 GOTO(out_locked, rc = -ESRCH);
670 /* recompute qunit in case it was never initialized */
671 qmt_revalidate_lqes(env, qmt, qb_flags);
673 /* slave just wants to acquire per-ID lock */
674 if (req_is_acq(qb_flags) && qb_count == 0)
675 GOTO(out_locked, rc = 0);
677 /* fetch how much quota space is already granted to this slave */
678 rc = qmt_slv_read(env, &lqe->lqe_id, slv_obj, &slv_granted);
680 LQUOTA_ERROR(lqe, "Failed to get granted for slave %s, rc=%d",
681 obd_uuid2str(uuid), rc);
682 GOTO(out_locked, rc);
684 /* recall how much space this slave currently owns in order to restore
685 * it in case of failure */
686 slv_granted_bck = slv_granted;
688 /* record current time for soft limit & grace time management */
689 now = ktime_get_real_seconds();
691 if (req_is_rel(qb_flags)) {
692 /* Slave would like to release quota space */
693 if (slv_granted < qb_count ||
694 !qmt_lqes_can_rel(env, qb_count)) {
695 /* can't release more than granted */
696 LQUOTA_ERROR_LQES(env,
697 "Release too much! uuid:%s release: %llu granted:%llu, total:%llu",
698 obd_uuid2str(uuid), qb_count,
699 slv_granted, lqe->lqe_granted);
700 GOTO(out_locked, rc = -EINVAL);
703 repbody->qb_count = qb_count;
704 /* put released space back to global pool */
705 qmt_rel_lqes(env, &slv_granted, qb_count);
706 GOTO(out_write, rc = 0);
709 if (req_has_rep(qb_flags) && slv_granted < qb_usage) {
710 /* Slave is reporting space usage in quota request and it turns
711 * out to be using more quota space than owned, so we adjust
712 * granted space regardless of the current state of affairs */
713 repbody->qb_count = qb_usage - slv_granted;
714 qmt_grant_lqes(env, &slv_granted, repbody->qb_count);
717 if (!req_is_acq(qb_flags) && !req_is_preacq(qb_flags))
718 GOTO(out_write, rc = 0);
720 qmt_adjust_edquot_notify(env, qmt, now, qb_flags);
721 if (qti_lqes_edquot(env))
722 /* no hope to claim further space back */
723 GOTO(out_write, rc = -EDQUOT);
725 if (qmt_space_exhausted_lqes(env, now)) {
726 /* might have some free space once rebalancing is completed */
727 rc = req_is_acq(qb_flags) ? -EINPROGRESS : -EDQUOT;
731 if (req_is_preacq(qb_flags)) {
732 /* slave would like to pre-acquire quota space. To do so, it
733 * reports in qb_count how much spare quota space it owns and we
734 * can grant back quota space which is consistent with qunit
736 if (qb_count >= qti_lqes_min_qunit(env))
737 /* slave already own the maximum it should */
738 GOTO(out_write, rc = 0);
740 count = qmt_lqes_alloc_expand(env, slv_granted, qb_count);
742 GOTO(out_write, rc = -EDQUOT);
744 repbody->qb_count += count;
745 qmt_grant_lqes(env, &slv_granted, count);
746 GOTO(out_write, rc = 0);
749 /* processing acquire request with clients waiting */
750 if (qmt_lqes_cannot_grant(env, qb_count)) {
751 /* cannot grant as much as asked, but can still afford to grant
752 * some quota space back */
753 count = qmt_lqes_grant_some_quota(env);
754 repbody->qb_count += count;
755 qmt_grant_lqes(env, &slv_granted, count);
756 GOTO(out_write, rc = 0);
759 /* Whouhou! we can satisfy the slave request! */
760 repbody->qb_count += qb_count;
761 qmt_grant_lqes(env, &slv_granted, qb_count);
763 /* Try to expand the acquired count for DQACQ */
764 count = qmt_lqes_alloc_expand(env, slv_granted, 0);
766 /* can even grant more than asked, it is like xmas ... */
767 repbody->qb_count += count;
768 qmt_grant_lqes(env, &slv_granted, count);
769 GOTO(out_write, rc = 0);
772 GOTO(out_write, rc = 0);
774 if (repbody->qb_count == 0)
775 GOTO(out_locked, rc);
777 /* start/stop grace timer if required */
778 qmt_lqes_tune_grace(env, now);
780 /* Update slave index first since it is easier to roll back */
781 ret = qmt_slv_write(env, th, lqe, slv_obj, LQUOTA_BUMP_VER,
782 &repbody->qb_slv_ver, slv_granted);
784 /* restore initial quota settings */
785 qmt_restore_lqes(env);
787 repbody->qb_count = 0;
788 GOTO(out_locked, rc = ret);
791 /* Update global index, no version bump needed */
792 ret = qmt_glb_write_lqes(env, th, 0, NULL);
795 /* restore initial quota settings */
796 qmt_restore_lqes(env);
798 repbody->qb_count = 0;
800 /* restore previous granted value */
801 ret = qmt_slv_write(env, th, lqe, slv_obj, 0, NULL,
804 LQUOTA_ERROR(lqe, "failed to restore initial slave "
805 "value rc:%d ret%d", rc, ret);
808 qmt_adjust_edquot_notify(env, qmt, now, qb_flags);
809 GOTO(out_locked, rc);
812 /* Total granted has been changed, let's try to adjust the qunit
813 * size according to the total granted & limits. */
815 /* clear/set edquot flag and notify slaves via glimpse if needed */
816 qmt_adjust_and_notify(env, qmt, now, qb_flags);
818 LQUOTA_DEBUG_LQES(env, "dqacq ends count:%llu ver:%llu rc:%d",
819 repbody->qb_count, repbody->qb_slv_ver, rc);
820 qti_lqes_write_unlock(env);
822 qti_lqes_restore_fini(env);
824 if (th != NULL && !IS_ERR(th))
825 dt_trans_stop(env, qmt->qmt_child, th);
827 if (slv_obj != NULL && !IS_ERR(slv_obj))
828 dt_object_put(env, slv_obj);
830 if ((req_is_acq(qb_flags) || req_is_preacq(qb_flags)) &&
831 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_EDQUOT)) {
832 /* introduce inconsistency between granted value in slave index
833 * and slave index copy of slave */
834 repbody->qb_count = 0;
842 * Extract index from uuid or quota index file name.
844 * \param[in] uuid uuid or quota index name(0x1020000-OST0001_UUID)
845 * \param[out] idx pointer to save index
847 * \retval slave type(QMT_STYPE_MDT or QMT_STYPE_OST)
848 * \retval -EINVAL wrong uuid
850 int qmt_uuid2idx(struct obd_uuid *uuid, int *idx)
852 char *uuid_str, *name, *dash;
855 uuid_str = (char *)uuid->uuid;
857 if (strnlen(uuid_str, UUID_MAX) >= UUID_MAX) {
858 CERROR("quota: UUID '%.*s' missing trailing NUL: rc = %d\n",
859 UUID_MAX, uuid_str, rc);
863 dash = strrchr(uuid_str, '-');
865 /* Going to get index from MDTXXXX/OSTXXXX. Thus uuid should
866 * have at least 8 bytes after '-': 3 for MDT/OST, 4 for index
867 * and 1 byte for null character. */
868 if (*dash != '-' || ((uuid_str + UUID_MAX - name) < 8)) {
869 CERROR("quota: wrong UUID format '%s': rc = %d\n",
874 rc = target_name2index(name, idx, NULL);
876 case LDD_F_SV_TYPE_MDT:
879 case LDD_F_SV_TYPE_OST:
883 CERROR("quota: wrong UUID type '%s': rc = %d\n", uuid_str, rc);
891 * Handle quota request from slave.
893 * \param env - is the environment passed by the caller
894 * \param ld - is the lu device associated with the qmt
895 * \param req - is the quota acquire request
897 static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
898 struct ptlrpc_request *req)
900 struct qmt_device *qmt = lu2qmt_dev(ld);
901 struct quota_body *qbody, *repbody;
902 struct obd_uuid *uuid;
903 struct ldlm_lock *lock;
908 qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
910 RETURN(err_serious(-EPROTO));
912 repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
914 RETURN(err_serious(-EFAULT));
916 /* verify if global lock is stale */
917 if (!lustre_handle_is_used(&qbody->qb_glb_lockh))
920 lock = ldlm_handle2lock(&qbody->qb_glb_lockh);
925 uuid = &req->rq_export->exp_client_uuid;
926 stype = qmt_uuid2idx(uuid, &idx);
930 if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) +
931 req_is_preacq(qbody->qb_flags) > 1) {
932 CERROR("%s: malformed quota request with conflicting flags set "
933 "(%x) from slave %s\n", qmt->qmt_svname,
934 qbody->qb_flags, obd_uuid2str(uuid));
938 if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
939 /* acquire and pre-acquire should use a valid ID lock */
941 if (!lustre_handle_is_used(&qbody->qb_lockh))
944 lock = ldlm_handle2lock(&qbody->qb_lockh);
946 /* no lock associated with this handle */
949 LDLM_DEBUG(lock, "%sacquire request",
950 req_is_preacq(qbody->qb_flags) ? "pre" : "");
952 if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) {
953 /* sorry, no way to cheat ... */
958 if (ldlm_is_ast_sent(lock)) {
959 struct ptlrpc_service_part *svc;
962 svc = req->rq_rqbd->rqbd_svcpt;
963 timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
964 timeout += (ldlm_bl_timeout(lock) >> 1);
966 /* lock is being cancelled, prolong timeout */
967 ldlm_refresh_waiting_lock(lock, timeout);
972 /* extract quota information from global index FID packed in the
974 rc = lquota_extract_fid(&qbody->qb_fid, &rtype, &qtype);
978 /* Find the quota entry associated with the quota id */
979 rc = qmt_pool_lqes_lookup(env, qmt, rtype, stype, qtype,
980 &qbody->qb_id, NULL, idx);
984 rc = qmt_dqacq0(env, qmt, uuid, qbody->qb_flags,
985 qbody->qb_count, qbody->qb_usage, repbody);
987 if (lustre_handle_is_used(&qbody->qb_lockh))
988 /* return current qunit value only to slaves owning an per-ID
989 * quota lock. For enqueue, the qunit value will be returned in
991 repbody->qb_qunit = qti_lqes_min_qunit(env);
992 CDEBUG(D_QUOTA, "qmt_dqacq return qb_qunit %llu qb_count %llu\n",
993 repbody->qb_qunit, repbody->qb_count);
998 /* Vector of quota request handlers. This vector is used by the MDT to forward
999 * requests to the quota master. */
1000 struct qmt_handlers qmt_hdls = {
1001 /* quota request handlers */
1002 .qmth_quotactl = qmt_quotactl,
1003 .qmth_dqacq = qmt_dqacq,
1006 .qmth_intent_policy = qmt_intent_policy,
1007 .qmth_lvbo_init = qmt_lvbo_init,
1008 .qmth_lvbo_update = qmt_lvbo_update,
1009 .qmth_lvbo_size = qmt_lvbo_size,
1010 .qmth_lvbo_fill = qmt_lvbo_fill,
1011 .qmth_lvbo_free = qmt_lvbo_free,
1013 EXPORT_SYMBOL(qmt_hdls);