4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include <obd_class.h>
34 #include "qmt_internal.h"
37 * Retrieve quota settings for a given identifier.
39 * \param env - is the environment passed by the caller
40 * \param qmt - is the quota master target
41 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or inode
42 * (i.e. LQUOTA_RES_MD)
43 * \param qtype - is the quota type
44 * \param id - is the quota indentifier for which we want to acces quota
46 * \param hard - is the output variable where to copy the hard limit
47 * \param soft - is the output variable where to copy the soft limit
48 * \param time - is the output variable where to copy the grace time
50 static int qmt_get(const struct lu_env *env, struct qmt_device *qmt,
51 __u8 restype, __u8 qtype, union lquota_id *id,
52 __u64 *hard, __u64 *soft, __u64 *time, bool is_default,
55 struct lquota_entry *lqe;
58 LASSERT(!is_default || id->qid_uid == 0);
59 if (pool_name && !strnlen(pool_name, LOV_MAXPOOLNAME))
62 /* look-up lqe structure containing quota settings */
63 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id, pool_name);
67 /* copy quota settings */
69 LQUOTA_DEBUG(lqe, "fetch settings");
71 *hard = lqe->lqe_hardlimit;
73 *soft = lqe->lqe_softlimit;
75 *time = lqe->lqe_gracetime;
76 if (lqe->lqe_is_default)
77 *time |= (__u64)LQUOTA_FLAG_DEFAULT <<
86 struct qmt_entry_iter_data {
87 const struct lu_env *qeid_env;
88 struct qmt_device *qeid_qmt;
91 static int qmt_entry_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
92 struct hlist_node *hnode, void *d)
94 struct qmt_entry_iter_data *iter = (struct qmt_entry_iter_data *)d;
95 struct lquota_entry *lqe;
97 lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
98 LASSERT(atomic_read(&lqe->lqe_ref) > 0);
100 if (lqe->lqe_id.qid_uid == 0 || !lqe->lqe_is_default)
103 return qmt_set_with_lqe(iter->qeid_env, iter->qeid_qmt, lqe, 0, 0, 0, 0,
107 static void qmt_set_id_notify(const struct lu_env *env, struct qmt_device *qmt,
108 struct lquota_entry *lqe)
110 struct lquota_entry *lqe_gl;
113 lqe_gl = lqe->lqe_is_global ? lqe : NULL;
114 rc = qmt_pool_lqes_lookup_spec(env, qmt, lqe_rtype(lqe),
115 lqe_qtype(lqe), &lqe->lqe_id);
116 if (!qti_lqes_cnt(env))
119 if (!lqe_gl && qti_lqes_glbl(env)->lqe_is_global)
120 lqe_gl = qti_lqes_glbl(env);
125 if (lqe_gl->lqe_glbl_data)
126 qmt_seed_glbe(env, lqe_gl->lqe_glbl_data);
127 /* Even if slaves haven't enqueued quota lock yet,
128 * it is needed to set lqe_revoke_time in qmt_id_lock_glimpse
129 * in case of reaching qpi_least_qunit */
130 qmt_id_lock_notify(qmt, lqe_gl);
136 * Update quota settings for a given lqe.
138 * \param env - is the environment passed by the caller
139 * \param qmt - is the quota master target
140 * \param lqe - is the lquota_entry for which we want to modify quota
142 * \param hard - is the new hard limit
143 * \param soft - is the new soft limit
144 * \param time - is the new grace time
145 * \param valid - is the list of settings to change
146 * \param is_default - true for default quota setting
147 * \param is_updated - true if the lqe is updated and no need to write back
150 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
151 struct lquota_entry *lqe, __u64 hard, __u64 soft,
152 __u64 time, __u32 valid, bool is_default, bool is_updated)
154 struct thandle *th = NULL;
157 bool dirtied = false;
159 bool need_id_notify = false;
162 /* need to write back to global quota file? */
164 /* By default we should have here only 1 lqe,
165 * so no allocations should be done. */
166 if (qti_lqes_restore_init(env))
167 GOTO(out_nolock, rc = -ENOMEM);
168 /* allocate & start transaction with enough credits to update
169 * quota settings in the global index file */
170 th = qmt_trans_start(env, lqe);
172 GOTO(out_nolock, rc = PTR_ERR(th));
175 now = ktime_get_real_seconds();
179 "changing quota settings valid:%x hard:%llu soft:%llu time:%llu",
180 valid, hard, soft, time);
182 if (is_default && lqe->lqe_id.qid_uid != 0) {
183 LQUOTA_DEBUG(lqe, "set qid %llu to use default quota setting",
184 lqe->lqe_id.qid_uid);
186 qmt_lqe_set_default(env, lqe->lqe_site->lqs_parent, lqe, false);
190 if ((valid & QIF_TIMES) != 0 && lqe->lqe_gracetime != time) {
191 /* change time settings */
192 lqe->lqe_gracetime = time;
196 if ((valid & QIF_LIMITS) != 0 &&
197 (lqe->lqe_hardlimit != hard || lqe->lqe_softlimit != soft)) {
198 rc = qmt_validate_limits(lqe, hard, soft);
202 /* change quota limits */
203 lqe->lqe_hardlimit = hard;
204 lqe->lqe_softlimit = soft;
207 /* recompute qunit in case it was never initialized */
208 if (qmt_revalidate(env, lqe))
209 need_id_notify = true;
211 /* clear grace time */
212 if (lqe->lqe_softlimit == 0 ||
213 lqe->lqe_granted <= lqe->lqe_softlimit)
214 /* no soft limit or below soft limit, let's clear grace
216 lqe->lqe_gracetime = 0;
217 else if ((valid & QIF_TIMES) == 0)
218 /* set grace only if user hasn't provided his own */
219 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
221 /* change enforced status based on new parameters */
222 if (lqe->lqe_id.qid_uid == 0 || (lqe->lqe_hardlimit == 0 &&
223 lqe->lqe_softlimit == 0)) {
224 if (lqe->lqe_enforced) {
225 lqe->lqe_enforced = false;
226 /* Clear qunit and edquot as lqe_adjust_edquot
227 * does not handle not enforced lqes */
230 need_id_notify = true;
233 lqe->lqe_enforced = true;
239 if (!is_default && lqe->lqe_is_default) {
240 LQUOTA_DEBUG(lqe, "the qid %llu has been set quota"
241 " explicitly, clear the default flag",
242 lqe->lqe_id.qid_uid);
244 qmt_lqe_clear_default(lqe);
250 /* write new quota settings to disk */
251 rc = qmt_glb_write(env, th, lqe, LQUOTA_BUMP_VER, &ver);
253 /* restore initial quota settings */
254 qmt_restore(lqe, &qti_lqes_rstr(env)[0]);
258 ver = dt_version_get(env, LQE_GLB_OBJ(lqe));
261 /* compute new qunit value now that we have modified the quota
262 * settings or clear/set edquot flag if needed */
263 need_id_notify |= qmt_adjust_qunit(env, lqe);
264 need_id_notify |= qmt_adjust_edquot(lqe, now);
268 lqe_write_unlock(lqe);
272 if (th != NULL && !IS_ERR(th))
273 dt_trans_stop(env, qmt->qmt_child, th);
274 qti_lqes_restore_fini(env);
277 if (rc == 0 && dirtied) {
278 qmt_glb_lock_notify(env, lqe, ver);
279 if (lqe->lqe_id.qid_uid == 0) {
280 struct qmt_entry_iter_data iter_data;
282 LQUOTA_DEBUG(lqe, "notify all lqe with default quota");
283 iter_data.qeid_env = env;
284 iter_data.qeid_qmt = qmt;
285 cfs_hash_for_each(lqe->lqe_site->lqs_hash,
286 qmt_entry_iter_cb, &iter_data);
287 /* Always notify slaves with default values. Don't
288 * care about overhead as will be sent only not changed
289 * values(see qmt_id_lock_cb for details).*/
290 need_id_notify = true;
292 /* qti_lqes_inited > 0 means we came here from another
293 * qmt_pool_lqes_lookup(qmt_dqacq, intent_policy ...). Thus
294 * we can't init and add new lqes to don't overwrite already
297 if (!qti_lqes_inited(env) && need_id_notify)
298 qmt_set_id_notify(env, qmt, lqe);
305 * Update quota settings for a given identifier.
307 * \param env - is the environment passed by the caller
308 * \param qmt - is the quota master target
309 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
310 * inode (i.e. LQUOTA_RES_MD)
311 * \param qtype - is the quota type
312 * \param id - is the quota indentifier for which we want to modify
314 * \param hard - is the new hard limit
315 * \param soft - is the new soft limit
316 * \param time - is the new grace time
317 * \param valid - is the list of settings to change
318 * \param is_default - true for default quota setting
319 * \param is_updated - true if the lqe is updated and no need to write back
321 static int qmt_set(const struct lu_env *env, struct qmt_device *qmt,
322 __u8 restype, __u8 qtype, union lquota_id *id,
323 __u64 hard, __u64 soft, __u64 time, __u32 valid,
324 bool is_default, bool is_updated, char *pool_name)
326 struct lquota_entry *lqe;
330 if (pool_name && !strnlen(pool_name, LOV_MAXPOOLNAME))
333 /* look-up quota entry associated with this ID */
334 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id, pool_name);
336 RETURN(PTR_ERR(lqe));
338 lqe->lqe_is_deleted = 0;
339 rc = qmt_set_with_lqe(env, qmt, lqe, hard, soft, time, valid,
340 is_default, is_updated);
342 lqe->lqe_is_deleted = 0;
349 * Delete the quota setting of the specified quota ID
351 * \param env - is the environment passed by the caller
352 * \param qmt - is the quota master target
353 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
354 * inode (i.e. LQUOTA_RES_MD)
355 * \param qtype - is the quota type
356 * \param qid - is the quota indentifier for which we want to delete its
359 static int qmt_delete_qid(const struct lu_env *env, struct qmt_device *qmt,
360 __u8 restype, __u8 qtype, __u64 qid)
362 struct qmt_thread_info *qti = qmt_info(env);
363 union lquota_id *quota_id = &qti->qti_id;
364 struct thandle *th = NULL;
365 struct qmt_pool_info *qpi = NULL;
366 struct lquota_entry *lqe = NULL;
372 quota_id->qid_uid = qid;
373 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, quota_id, NULL);
375 RETURN(PTR_ERR(lqe));
379 qpi = qmt_pool_lookup_glb(env, qmt, restype);
381 GOTO(out, rc = -ENOMEM);
383 th = qmt_trans_start(env, lqe);
385 GOTO(out, rc = PTR_ERR(th));
387 rc = lquota_disk_delete(env, th,
388 qpi->qpi_glb_obj[qtype], qid, &ver);
390 dt_trans_stop(env, qmt->qmt_child, th);
393 lqe_set_deleted(lqe);
394 qmt_glb_lock_notify(env, lqe, ver);
395 } else if (rc == -ENOENT) {
400 if (!IS_ERR_OR_NULL(qpi))
401 qpi_putref(env, qpi);
403 lqe_write_unlock(lqe);
410 * Handle quotactl request.
412 * \param env - is the environment passed by the caller
413 * \param ld - is the lu device associated with the qmt
414 * \param oqctl - is the quotactl request
416 static int qmt_quotactl(const struct lu_env *env, struct lu_device *ld,
417 struct obd_quotactl *oqctl)
419 struct qmt_thread_info *qti = qmt_info(env);
420 union lquota_id *id = &qti->qti_id;
421 struct qmt_device *qmt = lu2qmt_dev(ld);
422 struct obd_dqblk *dqb = &oqctl->qc_dqblk;
425 bool is_default = false;
428 LASSERT(qmt != NULL);
430 if (oqctl->qc_type >= LL_MAXQUOTAS)
431 /* invalid quota type */
434 poolname = LUSTRE_Q_CMD_IS_POOL(oqctl->qc_cmd) ?
435 oqctl->qc_poolname : NULL;
437 switch (oqctl->qc_cmd) {
439 case Q_GETINFO: /* read grace times */
440 case LUSTRE_Q_GETINFOPOOL:
441 /* Global grace time is stored in quota settings of ID 0. */
444 /* read inode grace time */
445 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id, NULL,
446 NULL, &oqctl->qc_dqinfo.dqi_igrace,
448 /* There could be no MD pool, so try to find DT pool */
449 if (rc && rc != -ENOENT)
452 /* read block grace time */
453 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id, NULL,
454 NULL, &oqctl->qc_dqinfo.dqi_bgrace,
458 case Q_SETINFO: /* modify grace times */
459 case LUSTRE_Q_SETINFOPOOL:
460 /* setinfo should be using dqi->dqi_valid, but lfs incorrectly
461 * sets the valid flags in dqb->dqb_valid instead, try to live
464 /* Global grace time is stored in quota settings of ID 0. */
467 if ((dqb->dqb_valid & QIF_ITIME) != 0) {
468 /* set inode grace time */
469 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
470 id, 0, 0, oqctl->qc_dqinfo.dqi_igrace,
471 QIF_TIMES, false, false,
477 if ((dqb->dqb_valid & QIF_BTIME) != 0)
478 /* set block grace time */
479 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
480 id, 0, 0, oqctl->qc_dqinfo.dqi_bgrace,
481 QIF_TIMES, false, false,
485 case LUSTRE_Q_GETDEFAULT:
486 case LUSTRE_Q_GETDEFAULT_POOL:
490 case Q_GETQUOTA: /* consult quota limit */
491 case LUSTRE_Q_GETQUOTAPOOL:
492 /* extract quota ID from quotactl request */
493 id->qid_uid = oqctl->qc_id;
495 /* look-up inode quota settings */
496 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id,
497 &dqb->dqb_ihardlimit, &dqb->dqb_isoftlimit,
498 &dqb->dqb_itime, is_default, poolname);
499 /* There could be no MD pool, so try to find DT pool */
500 if (rc && rc != -ENOENT)
503 dqb->dqb_valid |= QIF_ILIMITS | QIF_ITIME;
505 /* master isn't aware of actual inode usage */
506 dqb->dqb_curinodes = 0;
508 /* look-up block quota settings */
509 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id,
510 &dqb->dqb_bhardlimit, &dqb->dqb_bsoftlimit,
511 &dqb->dqb_btime, is_default, poolname);
515 dqb->dqb_valid |= QIF_BLIMITS | QIF_BTIME;
516 /* master doesn't know the actual block usage */
517 dqb->dqb_curspace = 0;
520 case LUSTRE_Q_SETDEFAULT:
521 case LUSTRE_Q_SETDEFAULT_POOL:
525 case Q_SETQUOTA: /* change quota limits */
526 case LUSTRE_Q_SETQUOTAPOOL:
527 /* extract quota ID from quotactl request */
528 id->qid_uid = oqctl->qc_id;
530 if ((dqb->dqb_valid & QIF_IFLAGS) != 0) {
531 /* update inode quota settings */
532 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
533 id, dqb->dqb_ihardlimit,
534 dqb->dqb_isoftlimit, dqb->dqb_itime,
535 dqb->dqb_valid & QIF_IFLAGS, is_default,
541 if ((dqb->dqb_valid & QIF_BFLAGS) != 0)
542 /* update block quota settings */
543 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
544 id, dqb->dqb_bhardlimit,
545 dqb->dqb_bsoftlimit, dqb->dqb_btime,
546 dqb->dqb_valid & QIF_BFLAGS, is_default,
550 case LUSTRE_Q_DELETEQID:
551 rc = qmt_delete_qid(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
556 rc = qmt_delete_qid(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
561 CERROR("%s: unsupported quotactl command: %d\n",
562 qmt->qmt_svname, oqctl->qc_cmd);
570 void qmt_grant_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt)
574 for (i = 0; i < qti_lqes_cnt(env); i++)
575 qti_lqe_granted(env, i) += cnt;
580 static inline bool qmt_lqes_can_rel(const struct lu_env *env, __u64 cnt)
582 bool can_release = true;
585 for (i = 0; i < qti_lqes_cnt(env); i++) {
586 if (cnt > qti_lqe_granted(env, i)) {
587 LQUOTA_ERROR(qti_lqes(env)[i],
588 "Can't release %llu that is larger than lqe_granted.\n",
596 static inline void qmt_rel_lqes(const struct lu_env *env, __u64 *slv, __u64 cnt)
600 for (i = 0; i < qti_lqes_cnt(env); i++)
601 qti_lqe_granted(env, i) -= cnt;
606 static inline bool qmt_lqes_cannot_grant(const struct lu_env *env, __u64 cnt)
608 bool cannot_grant = false;
611 for (i = 0; i < qti_lqes_cnt(env); i++) {
612 if (qti_lqe_hard(env, i) != 0 &&
613 qti_lqe_granted(env, i) + cnt > qti_lqe_hard(env, i)) {
621 static inline __u64 qmt_lqes_grant_some_quota(const struct lu_env *env)
623 __u64 min_count, tmp;
627 for (i = 0, min_count = 0; i < qti_lqes_cnt(env); i++) {
628 if (!qti_lqes(env)[i]->lqe_enforced &&
629 !qti_lqes(env)[i]->lqe_is_global)
632 tmp = qti_lqe_hard(env, i) - qti_lqe_granted(env, i);
634 min_count = tmp < min_count ? tmp : min_count;
643 static inline __u64 qmt_lqes_alloc_expand(const struct lu_env *env,
644 __u64 slv_granted, __u64 spare)
646 __u64 min_count, tmp;
650 for (i = 0, min_count = 0; i < qti_lqes_cnt(env); i++) {
651 /* Don't take into account not enforced lqes that belong
652 * to non global pool. These lqes present in array to
653 * support actual lqe_granted even for lqes without limits. */
654 if (!qti_lqes(env)[i]->lqe_enforced &&
655 !qti_lqes(env)[i]->lqe_is_global)
658 tmp = qmt_alloc_expand(qti_lqes(env)[i], slv_granted, spare);
660 min_count = tmp < min_count ? tmp : min_count;
669 static inline void qmt_lqes_tune_grace(const struct lu_env *env, __u64 now)
673 for (i = 0; i < qti_lqes_cnt(env); i++) {
674 struct lquota_entry *lqe;
676 lqe = qti_lqes(env)[i];
677 if (lqe->lqe_softlimit != 0) {
678 if (lqe->lqe_granted > lqe->lqe_softlimit &&
679 lqe->lqe_gracetime == 0) {
680 /* First time over soft limit, let's start grace
682 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
683 } else if (lqe->lqe_granted <= lqe->lqe_softlimit &&
684 lqe->lqe_gracetime != 0) {
685 /* Clear grace timer */
686 lqe->lqe_gracetime = 0;
693 * Helper function to handle quota request from slave.
695 * \param env - is the environment passed by the caller
696 * \param qmt - is the master device
697 * \param uuid - is the uuid associated with the slave
698 * \param qb_flags - are the quota request flags as packed in the quota_body
699 * \param qb_count - is the amount of quota space the slave wants to
701 * \param qb_usage - is the current space usage on the slave
702 * \param repbody - is the quota_body of reply
704 * \retval 0 : success
705 * \retval -EDQUOT : out of quota
706 * -EINPROGRESS : inform client to retry write/create
707 * -ve : other appropriate errors
709 int qmt_dqacq0(const struct lu_env *env, struct qmt_device *qmt,
710 struct obd_uuid *uuid, __u32 qb_flags, __u64 qb_count,
711 __u64 qb_usage, struct quota_body *repbody)
714 struct dt_object *slv_obj = NULL;
715 __u64 slv_granted, slv_granted_bck;
716 struct thandle *th = NULL;
718 struct lquota_entry *lqe = qti_lqes_glbl(env);
721 LASSERT(uuid != NULL);
723 /* initialize reply */
724 memset(repbody, 0, sizeof(*repbody));
725 memcpy(&repbody->qb_id, &lqe->lqe_id, sizeof(repbody->qb_id));
727 if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RECOVERABLE_ERR))
728 RETURN(-cfs_fail_val);
730 if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_PREACQ) &&
731 (req_is_preacq(qb_flags) || req_is_rel(qb_flags)))
734 if (qti_lqes_restore_init(env))
737 /* look-up index file associated with acquiring slave */
738 slv_obj = lquota_disk_slv_find(env, qmt->qmt_child, LQE_ROOT(lqe),
739 lu_object_fid(&LQE_GLB_OBJ(lqe)->do_lu),
742 GOTO(out, rc = PTR_ERR(slv_obj));
744 /* pack slave fid in reply just for sanity check */
745 memcpy(&repbody->qb_slv_fid, lu_object_fid(&slv_obj->do_lu),
746 sizeof(struct lu_fid));
748 /* allocate & start transaction with enough credits to update
749 * global & slave indexes */
750 th = qmt_trans_start_with_slv(env, NULL, slv_obj, false);
752 GOTO(out, rc = PTR_ERR(th));
754 qti_lqes_write_lock(env);
756 LQUOTA_DEBUG_LQES(env, "dqacq starts uuid:%s flags:0x%x wanted:%llu"
757 " usage:%llu", obd_uuid2str(uuid), qb_flags, qb_count,
760 /* Legal race, limits have been removed on master, but slave didn't
761 * receive the change yet. Just return EINPROGRESS until the slave gets
763 if (!lqe->lqe_enforced && !req_is_rel(qb_flags))
764 GOTO(out_locked, rc = -ESRCH);
766 /* recompute qunit in case it was never initialized */
767 qmt_revalidate_lqes(env, qmt, qb_flags);
769 /* slave just wants to acquire per-ID lock */
770 if (req_is_acq(qb_flags) && qb_count == 0)
771 GOTO(out_locked, rc = 0);
773 /* fetch how much quota space is already granted to this slave */
774 rc = qmt_slv_read(env, &lqe->lqe_id, slv_obj, &slv_granted);
776 LQUOTA_ERROR(lqe, "Failed to get granted for slave %s, rc=%d",
777 obd_uuid2str(uuid), rc);
778 GOTO(out_locked, rc);
780 /* recall how much space this slave currently owns in order to restore
781 * it in case of failure */
782 slv_granted_bck = slv_granted;
784 /* record current time for soft limit & grace time management */
785 now = ktime_get_real_seconds();
787 if (req_is_rel(qb_flags)) {
788 /* Slave would like to release quota space */
789 if (slv_granted < qb_count ||
790 !qmt_lqes_can_rel(env, qb_count)) {
791 /* can't release more than granted */
792 LQUOTA_ERROR_LQES(env,
793 "Release too much! uuid:%s release: %llu granted:%llu, total:%llu",
794 obd_uuid2str(uuid), qb_count,
795 slv_granted, lqe->lqe_granted);
796 GOTO(out_locked, rc = -EINVAL);
799 repbody->qb_count = qb_count;
800 /* put released space back to global pool */
801 qmt_rel_lqes(env, &slv_granted, qb_count);
802 GOTO(out_write, rc = 0);
805 if (req_has_rep(qb_flags) && slv_granted < qb_usage) {
806 /* Slave is reporting space usage in quota request and it turns
807 * out to be using more quota space than owned, so we adjust
808 * granted space regardless of the current state of affairs */
809 repbody->qb_count = qb_usage - slv_granted;
810 qmt_grant_lqes(env, &slv_granted, repbody->qb_count);
813 if (!req_is_acq(qb_flags) && !req_is_preacq(qb_flags))
814 GOTO(out_write, rc = 0);
816 qmt_adjust_edquot_notify(env, qmt, now, qb_flags);
817 if (qti_lqes_edquot(env))
818 /* no hope to claim further space back */
819 GOTO(out_write, rc = -EDQUOT);
821 if (qmt_space_exhausted_lqes(env, now)) {
822 /* might have some free space once rebalancing is completed */
823 rc = req_is_acq(qb_flags) ? -EINPROGRESS : -EDQUOT;
827 if (req_is_preacq(qb_flags)) {
828 /* slave would like to pre-acquire quota space. To do so, it
829 * reports in qb_count how much spare quota space it owns and we
830 * can grant back quota space which is consistent with qunit
832 if (qb_count >= qti_lqes_min_qunit(env))
833 /* slave already own the maximum it should */
834 GOTO(out_write, rc = 0);
836 count = qmt_lqes_alloc_expand(env, slv_granted, qb_count);
838 GOTO(out_write, rc = -EDQUOT);
840 repbody->qb_count += count;
841 qmt_grant_lqes(env, &slv_granted, count);
842 GOTO(out_write, rc = 0);
845 /* processing acquire request with clients waiting */
846 if (qmt_lqes_cannot_grant(env, qb_count)) {
847 /* cannot grant as much as asked, but can still afford to grant
848 * some quota space back */
849 count = qmt_lqes_grant_some_quota(env);
850 repbody->qb_count += count;
851 qmt_grant_lqes(env, &slv_granted, count);
852 GOTO(out_write, rc = 0);
855 /* Whouhou! we can satisfy the slave request! */
856 repbody->qb_count += qb_count;
857 qmt_grant_lqes(env, &slv_granted, qb_count);
859 /* Try to expand the acquired count for DQACQ */
860 count = qmt_lqes_alloc_expand(env, slv_granted, 0);
862 /* can even grant more than asked, it is like xmas ... */
863 repbody->qb_count += count;
864 qmt_grant_lqes(env, &slv_granted, count);
865 GOTO(out_write, rc = 0);
868 GOTO(out_write, rc = 0);
870 if (repbody->qb_count == 0)
871 GOTO(out_locked, rc);
873 /* start/stop grace timer if required */
874 qmt_lqes_tune_grace(env, now);
876 /* Update slave index first since it is easier to roll back */
877 ret = qmt_slv_write(env, th, lqe, slv_obj, LQUOTA_BUMP_VER,
878 &repbody->qb_slv_ver, slv_granted);
880 /* restore initial quota settings */
881 qmt_restore_lqes(env);
883 repbody->qb_count = 0;
884 GOTO(out_locked, rc = ret);
887 /* Update global index, no version bump needed */
888 ret = qmt_glb_write_lqes(env, th, 0, NULL);
891 /* restore initial quota settings */
892 qmt_restore_lqes(env);
894 repbody->qb_count = 0;
896 /* restore previous granted value */
897 ret = qmt_slv_write(env, th, lqe, slv_obj, 0, NULL,
900 LQUOTA_ERROR(lqe, "failed to restore initial slave "
901 "value rc:%d ret%d", rc, ret);
904 qmt_adjust_edquot_notify(env, qmt, now, qb_flags);
905 GOTO(out_locked, rc);
908 /* Total granted has been changed, let's try to adjust the qunit
909 * size according to the total granted & limits. */
911 /* clear/set edquot flag and notify slaves via glimpse if needed */
912 qmt_adjust_and_notify(env, qmt, now, qb_flags);
914 LQUOTA_DEBUG_LQES(env, "dqacq ends count:%llu ver:%llu rc:%d",
915 repbody->qb_count, repbody->qb_slv_ver, rc);
916 qti_lqes_write_unlock(env);
918 qti_lqes_restore_fini(env);
920 if (th != NULL && !IS_ERR(th))
921 dt_trans_stop(env, qmt->qmt_child, th);
923 if (slv_obj != NULL && !IS_ERR(slv_obj))
924 dt_object_put(env, slv_obj);
926 if ((req_is_acq(qb_flags) || req_is_preacq(qb_flags)) &&
927 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_EDQUOT)) {
928 /* introduce inconsistency between granted value in slave index
929 * and slave index copy of slave */
930 repbody->qb_count = 0;
938 * Extract index from uuid or quota index file name.
940 * \param[in] uuid uuid or quota index name(0x1020000-OST0001_UUID)
941 * \param[out] idx pointer to save index
943 * \retval slave type(QMT_STYPE_MDT or QMT_STYPE_OST)
944 * \retval -EINVAL wrong uuid
946 int qmt_uuid2idx(struct obd_uuid *uuid, int *idx)
948 char *uuid_str, *name, *dash;
951 uuid_str = (char *)uuid->uuid;
953 if (strnlen(uuid_str, UUID_MAX) >= UUID_MAX) {
954 CERROR("quota: UUID '%.*s' missing trailing NUL: rc = %d\n",
955 UUID_MAX, uuid_str, rc);
959 dash = strrchr(uuid_str, '-');
961 /* Going to get index from MDTXXXX/OSTXXXX. Thus uuid should
962 * have at least 8 bytes after '-': 3 for MDT/OST, 4 for index
963 * and 1 byte for null character. */
964 if (*dash != '-' || ((uuid_str + UUID_MAX - name) < 8)) {
965 CERROR("quota: wrong UUID format '%s': rc = %d\n",
970 rc = target_name2index(name, idx, NULL);
972 case LDD_F_SV_TYPE_MDT:
975 case LDD_F_SV_TYPE_OST:
979 CERROR("quota: wrong UUID type '%s': rc = %d\n", uuid_str, rc);
987 * Handle quota request from slave.
989 * \param env - is the environment passed by the caller
990 * \param ld - is the lu device associated with the qmt
991 * \param req - is the quota acquire request
993 static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
994 struct ptlrpc_request *req)
996 struct qmt_device *qmt = lu2qmt_dev(ld);
997 struct quota_body *qbody, *repbody;
998 struct obd_uuid *uuid;
999 struct ldlm_lock *lock;
1004 qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
1006 RETURN(err_serious(-EPROTO));
1008 repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
1009 if (repbody == NULL)
1010 RETURN(err_serious(-EFAULT));
1012 /* verify if global lock is stale */
1013 if (!lustre_handle_is_used(&qbody->qb_glb_lockh))
1016 lock = ldlm_handle2lock(&qbody->qb_glb_lockh);
1019 LDLM_LOCK_PUT(lock);
1021 uuid = &req->rq_export->exp_client_uuid;
1022 stype = qmt_uuid2idx(uuid, &idx);
1026 if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) +
1027 req_is_preacq(qbody->qb_flags) > 1) {
1028 CERROR("%s: malformed quota request with conflicting flags set "
1029 "(%x) from slave %s\n", qmt->qmt_svname,
1030 qbody->qb_flags, obd_uuid2str(uuid));
1034 if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
1035 /* acquire and pre-acquire should use a valid ID lock */
1037 if (!lustre_handle_is_used(&qbody->qb_lockh))
1040 lock = ldlm_handle2lock(&qbody->qb_lockh);
1042 /* no lock associated with this handle */
1045 LDLM_DEBUG(lock, "%sacquire request",
1046 req_is_preacq(qbody->qb_flags) ? "pre" : "");
1048 if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) {
1049 /* sorry, no way to cheat ... */
1050 LDLM_LOCK_PUT(lock);
1054 if (ldlm_is_ast_sent(lock)) {
1055 struct ptlrpc_service_part *svc;
1058 svc = req->rq_rqbd->rqbd_svcpt;
1059 timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
1060 timeout += (ldlm_bl_timeout(lock) >> 1);
1062 /* lock is being cancelled, prolong timeout */
1063 ldlm_refresh_waiting_lock(lock, timeout);
1065 LDLM_LOCK_PUT(lock);
1068 /* extract quota information from global index FID packed in the
1070 rc = lquota_extract_fid(&qbody->qb_fid, &rtype, &qtype);
1074 /* Find the quota entry associated with the quota id */
1075 rc = qmt_pool_lqes_lookup(env, qmt, rtype, stype, qtype,
1076 &qbody->qb_id, NULL, idx);
1080 rc = qmt_dqacq0(env, qmt, uuid, qbody->qb_flags,
1081 qbody->qb_count, qbody->qb_usage, repbody);
1083 if (lustre_handle_is_used(&qbody->qb_lockh))
1084 /* return current qunit value only to slaves owning an per-ID
1085 * quota lock. For enqueue, the qunit value will be returned in
1087 repbody->qb_qunit = qti_lqes_min_qunit(env);
1088 CDEBUG(D_QUOTA, "qmt_dqacq return qb_qunit %llu qb_count %llu\n",
1089 repbody->qb_qunit, repbody->qb_count);
1094 /* Vector of quota request handlers. This vector is used by the MDT to forward
1095 * requests to the quota master. */
1096 struct qmt_handlers qmt_hdls = {
1097 /* quota request handlers */
1098 .qmth_quotactl = qmt_quotactl,
1099 .qmth_dqacq = qmt_dqacq,
1102 .qmth_intent_policy = qmt_intent_policy,
1103 .qmth_lvbo_init = qmt_lvbo_init,
1104 .qmth_lvbo_update = qmt_lvbo_update,
1105 .qmth_lvbo_size = qmt_lvbo_size,
1106 .qmth_lvbo_fill = qmt_lvbo_fill,
1107 .qmth_lvbo_free = qmt_lvbo_free,
1109 EXPORT_SYMBOL(qmt_hdls);