4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include <obd_class.h>
34 #include "qmt_internal.h"
37 * Retrieve quota settings for a given identifier.
39 * \param env - is the environment passed by the caller
40 * \param qmt - is the quota master target
41 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or inode
42 * (i.e. LQUOTA_RES_MD)
43 * \param qtype - is the quota type
44 * \param id - is the quota indentifier for which we want to acces quota
46 * \param hard - is the output variable where to copy the hard limit
47 * \param soft - is the output variable where to copy the soft limit
48 * \param time - is the output variable where to copy the grace time
50 static int qmt_get(const struct lu_env *env, struct qmt_device *qmt,
51 __u8 restype, __u8 qtype, union lquota_id *id,
52 __u64 *hard, __u64 *soft, __u64 *time, bool is_default)
54 struct lquota_entry *lqe;
57 LASSERT(!is_default || id->qid_uid == 0);
59 /* look-up lqe structure containing quota settings */
60 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id);
64 /* copy quota settings */
66 LQUOTA_DEBUG(lqe, "fetch settings");
68 *hard = lqe->lqe_hardlimit;
70 *soft = lqe->lqe_softlimit;
72 *time = lqe->lqe_gracetime;
73 if (lqe->lqe_is_default)
74 *time |= (__u64)LQUOTA_FLAG_DEFAULT <<
83 struct qmt_entry_iter_data {
84 const struct lu_env *qeid_env;
85 struct qmt_device *qeid_qmt;
88 static int qmt_entry_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
89 struct hlist_node *hnode, void *d)
91 struct qmt_entry_iter_data *iter = (struct qmt_entry_iter_data *)d;
92 struct lquota_entry *lqe;
94 lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
95 LASSERT(atomic_read(&lqe->lqe_ref) > 0);
97 if (lqe->lqe_id.qid_uid == 0 || !lqe->lqe_is_default)
100 return qmt_set_with_lqe(iter->qeid_env, iter->qeid_qmt, lqe, 0, 0, 0, 0,
105 * Update quota settings for a given lqe.
107 * \param env - is the environment passed by the caller
108 * \param qmt - is the quota master target
109 * \param lqe - is the lquota_entry for which we want to modify quota
111 * \param hard - is the new hard limit
112 * \param soft - is the new soft limit
113 * \param time - is the new grace time
114 * \param valid - is the list of settings to change
115 * \param is_default - true for default quota setting
116 * \param is_updated - true if the lqe is updated and no need to write back
119 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
120 struct lquota_entry *lqe, __u64 hard, __u64 soft,
121 __u64 time, __u32 valid, bool is_default, bool is_updated)
123 struct qmt_thread_info *qti = qmt_info(env);
124 struct thandle *th = NULL;
127 bool dirtied = false;
131 /* need to write back to global quota file? */
133 /* allocate & start transaction with enough credits to update
134 * quota settings in the global index file */
135 th = qmt_trans_start(env, lqe, &qti->qti_restore);
137 GOTO(out_nolock, rc = PTR_ERR(th));
140 now = ktime_get_real_seconds();
143 LQUOTA_DEBUG(lqe, "changing quota settings valid:%x hard:%llu soft:"
144 "%llu time:%llu", valid, hard, soft, time);
146 if (is_default && lqe->lqe_id.qid_uid != 0) {
147 LQUOTA_DEBUG(lqe, "set qid %llu to use default quota setting",
148 lqe->lqe_id.qid_uid);
150 qmt_lqe_set_default(env, lqe->lqe_site->lqs_parent, lqe, false);
154 if ((valid & QIF_TIMES) != 0 && lqe->lqe_gracetime != time) {
155 /* change time settings */
156 lqe->lqe_gracetime = time;
160 if ((valid & QIF_LIMITS) != 0 &&
161 (lqe->lqe_hardlimit != hard || lqe->lqe_softlimit != soft)) {
162 rc = qmt_validate_limits(lqe, hard, soft);
166 /* change quota limits */
167 lqe->lqe_hardlimit = hard;
168 lqe->lqe_softlimit = soft;
171 /* recompute qunit in case it was never initialized */
172 qmt_revalidate(env, lqe);
174 /* clear grace time */
175 if (lqe->lqe_softlimit == 0 ||
176 lqe->lqe_granted <= lqe->lqe_softlimit)
177 /* no soft limit or below soft limit, let's clear grace
179 lqe->lqe_gracetime = 0;
180 else if ((valid & QIF_TIMES) == 0)
181 /* set grace only if user hasn't provided his own */
182 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
184 /* change enforced status based on new parameters */
185 if (lqe->lqe_id.qid_uid == 0 || (lqe->lqe_hardlimit == 0 &&
186 lqe->lqe_softlimit == 0))
187 lqe->lqe_enforced = false;
189 lqe->lqe_enforced = true;
194 if (!is_default && lqe->lqe_is_default) {
195 LQUOTA_DEBUG(lqe, "the qid %llu has been set quota"
196 " explicitly, clear the default flag",
197 lqe->lqe_id.qid_uid);
199 qmt_lqe_clear_default(lqe);
205 /* write new quota settings to disk */
206 rc = qmt_glb_write(env, th, lqe, LQUOTA_BUMP_VER, &ver);
208 /* restore initial quota settings */
209 qmt_restore(lqe, &qti->qti_restore);
213 ver = dt_version_get(env, LQE_GLB_OBJ(lqe));
216 /* compute new qunit value now that we have modified the quota
218 qmt_adjust_qunit(env, lqe);
220 /* clear/set edquot flag as needed */
221 qmt_adjust_edquot(lqe, now);
225 lqe_write_unlock(lqe);
228 if (th != NULL && !IS_ERR(th))
229 dt_trans_stop(env, qmt->qmt_child, th);
231 if (rc == 0 && dirtied) {
232 qmt_glb_lock_notify(env, lqe, ver);
233 if (lqe->lqe_id.qid_uid == 0) {
234 struct qmt_entry_iter_data iter_data;
236 LQUOTA_DEBUG(lqe, "notify all lqe with default quota");
237 iter_data.qeid_env = env;
238 iter_data.qeid_qmt = qmt;
239 cfs_hash_for_each_safe(lqe->lqe_site->lqs_hash,
240 qmt_entry_iter_cb, &iter_data);
248 * Update quota settings for a given identifier.
250 * \param env - is the environment passed by the caller
251 * \param qmt - is the quota master target
252 * \param restype - is the pool type, either block (i.e. LQUOTA_RES_DT) or
253 * inode (i.e. LQUOTA_RES_MD)
254 * \param qtype - is the quota type
255 * \param id - is the quota indentifier for which we want to modify
257 * \param hard - is the new hard limit
258 * \param soft - is the new soft limit
259 * \param time - is the new grace time
260 * \param valid - is the list of settings to change
261 * \param is_default - true for default quota setting
262 * \param is_updated - true if the lqe is updated and no need to write back
264 static int qmt_set(const struct lu_env *env, struct qmt_device *qmt,
265 __u8 restype, __u8 qtype, union lquota_id *id,
266 __u64 hard, __u64 soft, __u64 time, __u32 valid,
267 bool is_default, bool is_updated)
269 struct lquota_entry *lqe;
273 /* look-up quota entry associated with this ID */
274 lqe = qmt_pool_lqe_lookup(env, qmt, restype, qtype, id);
276 RETURN(PTR_ERR(lqe));
278 rc = qmt_set_with_lqe(env, qmt, lqe, hard, soft, time, valid,
279 is_default, is_updated);
286 * Handle quotactl request.
288 * \param env - is the environment passed by the caller
289 * \param ld - is the lu device associated with the qmt
290 * \param oqctl - is the quotactl request
292 static int qmt_quotactl(const struct lu_env *env, struct lu_device *ld,
293 struct obd_quotactl *oqctl)
295 struct qmt_thread_info *qti = qmt_info(env);
296 union lquota_id *id = &qti->qti_id;
297 struct qmt_device *qmt = lu2qmt_dev(ld);
298 struct obd_dqblk *dqb = &oqctl->qc_dqblk;
300 bool is_default = false;
303 LASSERT(qmt != NULL);
305 if (oqctl->qc_type >= LL_MAXQUOTAS)
306 /* invalid quota type */
309 switch (oqctl->qc_cmd) {
311 case Q_GETINFO: /* read grace times */
312 /* Global grace time is stored in quota settings of ID 0. */
315 /* read inode grace time */
316 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id,
317 NULL, NULL, &oqctl->qc_dqinfo.dqi_igrace, false);
321 /* read block grace time */
322 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id,
323 NULL, NULL, &oqctl->qc_dqinfo.dqi_bgrace, false);
326 case Q_SETINFO: /* modify grace times */
327 /* setinfo should be using dqi->dqi_valid, but lfs incorrectly
328 * sets the valid flags in dqb->dqb_valid instead, try to live
331 /* Global grace time is stored in quota settings of ID 0. */
334 if ((dqb->dqb_valid & QIF_ITIME) != 0) {
335 /* set inode grace time */
336 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
337 id, 0, 0, oqctl->qc_dqinfo.dqi_igrace,
338 QIF_TIMES, false, false);
343 if ((dqb->dqb_valid & QIF_BTIME) != 0)
344 /* set block grace time */
345 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
346 id, 0, 0, oqctl->qc_dqinfo.dqi_bgrace,
347 QIF_TIMES, false, false);
350 case LUSTRE_Q_GETDEFAULT:
354 case Q_GETQUOTA: /* consult quota limit */
355 /* extract quota ID from quotactl request */
356 id->qid_uid = oqctl->qc_id;
358 /* look-up inode quota settings */
359 rc = qmt_get(env, qmt, LQUOTA_RES_MD, oqctl->qc_type, id,
360 &dqb->dqb_ihardlimit, &dqb->dqb_isoftlimit,
361 &dqb->dqb_itime, is_default);
365 dqb->dqb_valid |= QIF_ILIMITS | QIF_ITIME;
366 /* master isn't aware of actual inode usage */
367 dqb->dqb_curinodes = 0;
369 /* look-up block quota settings */
370 rc = qmt_get(env, qmt, LQUOTA_RES_DT, oqctl->qc_type, id,
371 &dqb->dqb_bhardlimit, &dqb->dqb_bsoftlimit,
372 &dqb->dqb_btime, is_default);
376 dqb->dqb_valid |= QIF_BLIMITS | QIF_BTIME;
377 /* master doesn't know the actual block usage */
378 dqb->dqb_curspace = 0;
381 case LUSTRE_Q_SETDEFAULT:
385 case Q_SETQUOTA: /* change quota limits */
386 /* extract quota ID from quotactl request */
387 id->qid_uid = oqctl->qc_id;
389 if ((dqb->dqb_valid & QIF_IFLAGS) != 0) {
390 /* update inode quota settings */
391 rc = qmt_set(env, qmt, LQUOTA_RES_MD, oqctl->qc_type,
392 id, dqb->dqb_ihardlimit,
393 dqb->dqb_isoftlimit, dqb->dqb_itime,
394 dqb->dqb_valid & QIF_IFLAGS, is_default,
400 if ((dqb->dqb_valid & QIF_BFLAGS) != 0)
401 /* update block quota settings */
402 rc = qmt_set(env, qmt, LQUOTA_RES_DT, oqctl->qc_type,
403 id, dqb->dqb_bhardlimit,
404 dqb->dqb_bsoftlimit, dqb->dqb_btime,
405 dqb->dqb_valid & QIF_BFLAGS, is_default,
410 CERROR("%s: unsupported quotactl command: %d\n",
411 qmt->qmt_svname, oqctl->qc_cmd);
419 * Helper function to handle quota request from slave.
421 * \param env - is the environment passed by the caller
422 * \param lqe - is the lquota_entry subject to the quota request
423 * \param qmt - is the master device
424 * \param uuid - is the uuid associated with the slave
425 * \param qb_flags - are the quota request flags as packed in the quota_body
426 * \param qb_count - is the amount of quota space the slave wants to
428 * \param qb_usage - is the current space usage on the slave
429 * \param repbody - is the quota_body of reply
431 * \retval 0 : success
432 * \retval -EDQUOT : out of quota
433 * -EINPROGRESS : inform client to retry write/create
434 * -ve : other appropriate errors
436 int qmt_dqacq0(const struct lu_env *env, struct lquota_entry *lqe,
437 struct qmt_device *qmt, struct obd_uuid *uuid, __u32 qb_flags,
438 __u64 qb_count, __u64 qb_usage, struct quota_body *repbody)
440 struct qmt_thread_info *qti = qmt_info(env);
442 struct dt_object *slv_obj = NULL;
443 __u64 slv_granted, slv_granted_bck;
444 struct thandle *th = NULL;
448 LASSERT(uuid != NULL);
450 /* initialize reply */
451 memset(repbody, 0, sizeof(*repbody));
452 memcpy(&repbody->qb_id, &lqe->lqe_id, sizeof(repbody->qb_id));
454 if (OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RECOVERABLE_ERR))
455 RETURN(-cfs_fail_val);
457 /* look-up index file associated with acquiring slave */
458 slv_obj = lquota_disk_slv_find(env, qmt->qmt_child, LQE_ROOT(lqe),
459 lu_object_fid(&LQE_GLB_OBJ(lqe)->do_lu),
462 GOTO(out, rc = PTR_ERR(slv_obj));
464 /* pack slave fid in reply just for sanity check */
465 memcpy(&repbody->qb_slv_fid, lu_object_fid(&slv_obj->do_lu),
466 sizeof(struct lu_fid));
468 /* allocate & start transaction with enough credits to update
469 * global & slave indexes */
470 th = qmt_trans_start_with_slv(env, lqe, slv_obj, &qti->qti_restore);
472 GOTO(out, rc = PTR_ERR(th));
475 LQUOTA_DEBUG(lqe, "dqacq starts uuid:%s flags:0x%x wanted:%llu"
476 " usage:%llu", obd_uuid2str(uuid), qb_flags, qb_count,
479 /* Legal race, limits have been removed on master, but slave didn't
480 * receive the change yet. Just return EINPROGRESS until the slave gets
482 if (!lqe->lqe_enforced && !req_is_rel(qb_flags))
483 GOTO(out_locked, rc = -ESRCH);
485 /* recompute qunit in case it was never initialized */
486 qmt_revalidate(env, lqe);
488 /* slave just wants to acquire per-ID lock */
489 if (req_is_acq(qb_flags) && qb_count == 0)
490 GOTO(out_locked, rc = 0);
492 /* fetch how much quota space is already granted to this slave */
493 rc = qmt_slv_read(env, lqe, slv_obj, &slv_granted);
495 LQUOTA_ERROR(lqe, "Failed to get granted for slave %s, rc=%d",
496 obd_uuid2str(uuid), rc);
497 GOTO(out_locked, rc);
499 /* recall how much space this slave currently owns in order to restore
500 * it in case of failure */
501 slv_granted_bck = slv_granted;
503 /* record current time for soft limit & grace time management */
504 now = ktime_get_real_seconds();
506 if (req_is_rel(qb_flags)) {
507 /* Slave would like to release quota space */
508 if (slv_granted < qb_count ||
509 lqe->lqe_granted < qb_count) {
510 /* can't release more than granted */
511 LQUOTA_ERROR(lqe, "Release too much! uuid:%s release:"
512 "%llu granted:%llu, total:%llu",
513 obd_uuid2str(uuid), qb_count,
514 slv_granted, lqe->lqe_granted);
515 GOTO(out_locked, rc = -EINVAL);
518 repbody->qb_count = qb_count;
519 /* put released space back to global pool */
520 QMT_REL(lqe, slv_granted, qb_count);
521 GOTO(out_write, rc = 0);
524 if (req_has_rep(qb_flags) && slv_granted < qb_usage) {
525 /* Slave is reporting space usage in quota request and it turns
526 * out to be using more quota space than owned, so we adjust
527 * granted space regardless of the current state of affairs */
528 repbody->qb_count = qb_usage - slv_granted;
529 QMT_GRANT(lqe, slv_granted, repbody->qb_count);
532 if (!req_is_acq(qb_flags) && !req_is_preacq(qb_flags))
533 GOTO(out_write, rc = 0);
535 qmt_adjust_edquot(lqe, now);
537 /* no hope to claim further space back */
538 GOTO(out_write, rc = -EDQUOT);
540 if (qmt_space_exhausted(lqe, now)) {
541 /* might have some free space once rebalancing is completed */
542 rc = req_is_acq(qb_flags) ? -EINPROGRESS : -EDQUOT;
546 if (req_is_preacq(qb_flags)) {
547 /* slave would like to pre-acquire quota space. To do so, it
548 * reports in qb_count how much spare quota space it owns and we
549 * can grant back quota space which is consistent with qunit
552 if (qb_count >= lqe->lqe_qunit)
553 /* slave already own the maximum it should */
554 GOTO(out_write, rc = 0);
556 count = qmt_alloc_expand(lqe, slv_granted, qb_count);
558 GOTO(out_write, rc = -EDQUOT);
560 repbody->qb_count += count;
561 QMT_GRANT(lqe, slv_granted, count);
562 GOTO(out_write, rc = 0);
565 /* processing acquire request with clients waiting */
566 if (lqe->lqe_hardlimit != 0 &&
567 lqe->lqe_granted + qb_count > lqe->lqe_hardlimit) {
568 /* cannot grant as much as asked, but can still afford to grant
569 * some quota space back */
570 count = lqe->lqe_hardlimit - lqe->lqe_granted;
571 repbody->qb_count += count;
572 QMT_GRANT(lqe, slv_granted, count);
573 GOTO(out_write, rc = 0);
576 /* Whouhou! we can satisfy the slave request! */
577 repbody->qb_count += qb_count;
578 QMT_GRANT(lqe, slv_granted, qb_count);
580 /* Try to expand the acquired count for DQACQ */
581 count = qmt_alloc_expand(lqe, slv_granted, 0);
583 /* can even grant more than asked, it is like xmas ... */
584 repbody->qb_count += count;
585 QMT_GRANT(lqe, slv_granted, count);
586 GOTO(out_write, rc = 0);
589 GOTO(out_write, rc = 0);
591 if (repbody->qb_count == 0)
592 GOTO(out_locked, rc);
594 /* start/stop grace timer if required */
595 if (lqe->lqe_softlimit != 0) {
596 if (lqe->lqe_granted > lqe->lqe_softlimit &&
597 lqe->lqe_gracetime == 0)
598 /* first time over soft limit, let's start grace
600 lqe->lqe_gracetime = now + qmt_lqe_grace(lqe);
601 else if (lqe->lqe_granted <= lqe->lqe_softlimit &&
602 lqe->lqe_gracetime != 0)
603 /* Clear grace timer */
604 lqe->lqe_gracetime = 0;
607 /* Update slave index first since it is easier to roll back */
608 ret = qmt_slv_write(env, th, lqe, slv_obj, LQUOTA_BUMP_VER,
609 &repbody->qb_slv_ver, slv_granted);
611 /* restore initial quota settings */
612 qmt_restore(lqe, &qti->qti_restore);
614 repbody->qb_count = 0;
615 GOTO(out_locked, rc = ret);
618 /* Update global index, no version bump needed */
619 ret = qmt_glb_write(env, th, lqe, 0, NULL);
622 /* restore initial quota settings */
623 qmt_restore(lqe, &qti->qti_restore);
625 repbody->qb_count = 0;
627 /* restore previous granted value */
628 ret = qmt_slv_write(env, th, lqe, slv_obj, 0, NULL,
631 LQUOTA_ERROR(lqe, "failed to restore initial slave "
632 "value rc:%d ret%d", rc, ret);
635 qmt_adjust_edquot(lqe, now);
636 GOTO(out_locked, rc);
639 /* Total granted has been changed, let's try to adjust the qunit
640 * size according to the total granted & limits. */
641 qmt_adjust_qunit(env, lqe);
643 /* clear/set edquot flag and notify slaves via glimpse if needed */
644 qmt_adjust_edquot(lqe, now);
646 LQUOTA_DEBUG(lqe, "dqacq ends count:%llu ver:%llu rc:%d",
647 repbody->qb_count, repbody->qb_slv_ver, rc);
648 lqe_write_unlock(lqe);
650 if (th != NULL && !IS_ERR(th))
651 dt_trans_stop(env, qmt->qmt_child, th);
653 if (slv_obj != NULL && !IS_ERR(slv_obj))
654 dt_object_put(env, slv_obj);
656 if ((req_is_acq(qb_flags) || req_is_preacq(qb_flags)) &&
657 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_EDQUOT)) {
658 /* introduce inconsistency between granted value in slave index
659 * and slave index copy of slave */
660 repbody->qb_count = 0;
668 * Handle quota request from slave.
670 * \param env - is the environment passed by the caller
671 * \param ld - is the lu device associated with the qmt
672 * \param req - is the quota acquire request
674 static int qmt_dqacq(const struct lu_env *env, struct lu_device *ld,
675 struct ptlrpc_request *req)
677 struct qmt_device *qmt = lu2qmt_dev(ld);
678 struct quota_body *qbody, *repbody;
679 struct obd_uuid *uuid;
680 struct ldlm_lock *lock;
681 struct lquota_entry *lqe;
682 int pool_type, qtype;
686 qbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
688 RETURN(err_serious(-EPROTO));
690 repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
692 RETURN(err_serious(-EFAULT));
694 /* verify if global lock is stale */
695 if (!lustre_handle_is_used(&qbody->qb_glb_lockh))
698 lock = ldlm_handle2lock(&qbody->qb_glb_lockh);
703 uuid = &req->rq_export->exp_client_uuid;
705 if (req_is_rel(qbody->qb_flags) + req_is_acq(qbody->qb_flags) +
706 req_is_preacq(qbody->qb_flags) > 1) {
707 CERROR("%s: malformed quota request with conflicting flags set "
708 "(%x) from slave %s\n", qmt->qmt_svname,
709 qbody->qb_flags, obd_uuid2str(uuid));
713 if (req_is_acq(qbody->qb_flags) || req_is_preacq(qbody->qb_flags)) {
714 /* acquire and pre-acquire should use a valid ID lock */
716 if (!lustre_handle_is_used(&qbody->qb_lockh))
719 lock = ldlm_handle2lock(&qbody->qb_lockh);
721 /* no lock associated with this handle */
724 LDLM_DEBUG(lock, "%sacquire request",
725 req_is_preacq(qbody->qb_flags) ? "pre" : "");
727 if (!obd_uuid_equals(&lock->l_export->exp_client_uuid, uuid)) {
728 /* sorry, no way to cheat ... */
733 if (ldlm_is_ast_sent(lock)) {
734 struct ptlrpc_service_part *svc;
737 svc = req->rq_rqbd->rqbd_svcpt;
738 timeout = at_est2timeout(at_get(&svc->scp_at_estimate));
739 timeout += (ldlm_bl_timeout(lock) >> 1);
741 /* lock is being cancelled, prolong timeout */
742 ldlm_refresh_waiting_lock(lock, timeout);
747 /* extract quota information from global index FID packed in the
749 rc = lquota_extract_fid(&qbody->qb_fid, &pool_type, &qtype);
753 /* Find the quota entry associated with the quota id */
754 lqe = qmt_pool_lqe_lookup(env, qmt, pool_type, qtype,
757 RETURN(PTR_ERR(lqe));
759 /* process quota request */
760 rc = qmt_dqacq0(env, lqe, qmt, uuid, qbody->qb_flags, qbody->qb_count,
761 qbody->qb_usage, repbody);
763 if (lustre_handle_is_used(&qbody->qb_lockh))
764 /* return current qunit value only to slaves owning an per-ID
765 * quota lock. For enqueue, the qunit value will be returned in
767 repbody->qb_qunit = lqe->lqe_qunit;
772 /* Vector of quota request handlers. This vector is used by the MDT to forward
773 * requests to the quota master. */
774 struct qmt_handlers qmt_hdls = {
775 /* quota request handlers */
776 .qmth_quotactl = qmt_quotactl,
777 .qmth_dqacq = qmt_dqacq,
780 .qmth_intent_policy = qmt_intent_policy,
781 .qmth_lvbo_init = qmt_lvbo_init,
782 .qmth_lvbo_update = qmt_lvbo_update,
783 .qmth_lvbo_size = qmt_lvbo_size,
784 .qmth_lvbo_fill = qmt_lvbo_fill,
785 .qmth_lvbo_free = qmt_lvbo_free,
787 EXPORT_SYMBOL(qmt_hdls);