4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2013, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include "qmt_internal.h"
36 * Initialize qmt-specific fields of quota entry.
38 * \param lqe - is the quota entry to initialize
39 * \param arg - is the pointer to the qmt_pool_info structure
41 static void qmt_lqe_init(struct lquota_entry *lqe, void *arg)
43 LASSERT(lqe_is_master(lqe));
45 lqe->lqe_revoke_time = 0;
46 init_rwsem(&lqe->lqe_sem);
50 * Update a lquota entry. This is done by reading quota settings from the global
51 * index. The lquota entry must be write locked.
53 * \param env - the environment passed by the caller
54 * \param lqe - is the quota entry to refresh
55 * \param arg - is the pointer to the qmt_pool_info structure
57 static int qmt_lqe_read(const struct lu_env *env, struct lquota_entry *lqe,
60 struct qmt_thread_info *qti = qmt_info(env);
61 struct qmt_pool_info *pool = (struct qmt_pool_info *)arg;
65 LASSERT(lqe_is_master(lqe));
67 /* read record from disk */
68 rc = lquota_disk_read(env, pool->qpi_glb_obj[lqe->lqe_site->lqs_qtype],
69 &lqe->lqe_id, (struct dt_rec *)&qti->qti_glb_rec);
73 /* no such entry, assume quota isn't enforced for this user */
74 lqe->lqe_enforced = false;
77 /* copy quota settings from on-disk record */
78 lqe->lqe_granted = qti->qti_glb_rec.qbr_granted;
79 lqe->lqe_hardlimit = qti->qti_glb_rec.qbr_hardlimit;
80 lqe->lqe_softlimit = qti->qti_glb_rec.qbr_softlimit;
81 lqe->lqe_gracetime = qti->qti_glb_rec.qbr_time;
83 if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0)
84 /* {hard,soft}limit=0 means no quota enforced */
85 lqe->lqe_enforced = false;
87 lqe->lqe_enforced = true;
91 LQUOTA_ERROR(lqe, "failed to read quota entry from disk, rc:%d",
96 LQUOTA_DEBUG(lqe, "read");
101 * Print lqe information for debugging.
103 * \param lqe - is the quota entry to debug
104 * \param arg - is the pointer to the qmt_pool_info structure
105 * \param msgdata - debug message
106 * \param fmt - format of debug message
108 static void qmt_lqe_debug(struct lquota_entry *lqe, void *arg,
109 struct libcfs_debug_msg_data *msgdata,
110 const char *fmt, va_list args)
112 struct qmt_pool_info *pool = (struct qmt_pool_info *)arg;
114 libcfs_debug_vmsg2(msgdata, fmt, args,
115 "qmt:%s pool:%d-%s id:"LPU64" enforced:%d hard:"LPU64
116 " soft:"LPU64" granted:"LPU64" time:"LPU64" qunit:"
117 LPU64" edquot:%d may_rel:"LPU64" revoke:"LPU64"\n",
118 pool->qpi_qmt->qmt_svname,
119 pool->qpi_key & 0x0000ffff,
120 RES_NAME(pool->qpi_key >> 16),
121 lqe->lqe_id.qid_uid, lqe->lqe_enforced,
122 lqe->lqe_hardlimit, lqe->lqe_softlimit,
123 lqe->lqe_granted, lqe->lqe_gracetime,
124 lqe->lqe_qunit, lqe->lqe_edquot, lqe->lqe_may_rel,
125 lqe->lqe_revoke_time);
129 * Vector of quota entry operations supported on the master
131 struct lquota_entry_operations qmt_lqe_ops = {
132 .lqe_init = qmt_lqe_init,
133 .lqe_read = qmt_lqe_read,
134 .lqe_debug = qmt_lqe_debug,
138 * Reserve enough credits to update records in both the global index and
139 * the slave index identified by \slv_obj
141 * \param env - is the environment passed by the caller
142 * \param lqe - is the quota entry associated with the identifier
143 * subject to the change
144 * \param slv_obj - is the dt_object associated with the index file
145 * \param restore - is a temporary storage for current quota settings which will
146 * be restored if something goes wrong at index update time.
148 struct thandle *qmt_trans_start_with_slv(const struct lu_env *env,
149 struct lquota_entry *lqe,
150 struct dt_object *slv_obj,
151 struct qmt_lqe_restore *restore)
153 struct qmt_device *qmt;
158 LASSERT(lqe != NULL);
159 LASSERT(lqe_is_master(lqe));
161 qmt = lqe2qpi(lqe)->qpi_qmt;
164 LQUOTA_DEBUG(lqe, "declare write for slv "DFID,
165 PFID(lu_object_fid(&slv_obj->do_lu)));
167 /* start transaction */
168 th = dt_trans_create(env, qmt->qmt_child);
173 /* quota settings on master are updated synchronously for the
177 /* reserve credits for global index update */
178 rc = lquota_disk_declare_write(env, th, LQE_GLB_OBJ(lqe), &lqe->lqe_id);
182 if (slv_obj != NULL) {
183 /* reserve credits for slave index update */
184 rc = lquota_disk_declare_write(env, th, slv_obj, &lqe->lqe_id);
189 /* start transaction */
190 rc = dt_trans_start_local(env, qmt->qmt_child, th);
197 dt_trans_stop(env, qmt->qmt_child, th);
199 LQUOTA_ERROR(lqe, "failed to slv declare write for "DFID
200 ", rc:%d", PFID(lu_object_fid(&slv_obj->do_lu)),
203 restore->qlr_hardlimit = lqe->lqe_hardlimit;
204 restore->qlr_softlimit = lqe->lqe_softlimit;
205 restore->qlr_gracetime = lqe->lqe_gracetime;
206 restore->qlr_granted = lqe->lqe_granted;
207 restore->qlr_qunit = lqe->lqe_qunit;
213 * Reserve enough credits to update a record in the global index
215 * \param env - is the environment passed by the caller
216 * \param lqe - is the quota entry to be modified in the global index
217 * \param restore - is a temporary storage for current quota settings which will
218 * be restored if something goes wrong at index update time.
220 struct thandle *qmt_trans_start(const struct lu_env *env,
221 struct lquota_entry *lqe,
222 struct qmt_lqe_restore *restore)
224 LQUOTA_DEBUG(lqe, "declare write");
225 return qmt_trans_start_with_slv(env, lqe, NULL, restore);
229 * Update record associated with a quota entry in the global index.
230 * If LQUOTA_BUMP_VER is set, then the global index version must also be
232 * The entry must be at least read locked, dirty and up-to-date.
234 * \param env - the environment passed by the caller
235 * \param th - is the transaction handle to be used for the disk writes
236 * \param lqe - is the quota entry to udpate
237 * \param obj - is the dt_object associated with the index file
238 * \param flags - can be LQUOTA_BUMP_VER or LQUOTA_SET_VER.
239 * \param ver - is used to return the new version of the index.
241 * \retval - 0 on success and lqe dirty flag cleared,
242 * appropriate error on failure and uptodate flag cleared.
244 int qmt_glb_write(const struct lu_env *env, struct thandle *th,
245 struct lquota_entry *lqe, __u32 flags, __u64 *ver)
247 struct qmt_thread_info *qti = qmt_info(env);
248 struct lquota_glb_rec *rec;
252 LASSERT(lqe != NULL);
253 LASSERT(lqe_is_master(lqe));
254 LASSERT(lqe_is_locked(lqe));
255 LASSERT(lqe->lqe_uptodate);
256 LASSERT((flags & ~(LQUOTA_BUMP_VER | LQUOTA_SET_VER)) == 0);
258 LQUOTA_DEBUG(lqe, "write glb");
260 /* never delete the entry even when the id isn't enforced and
261 * no any guota granted, otherwise, this entry will not be
262 * synced to slave during the reintegration. */
263 rec = &qti->qti_glb_rec;
265 /* fill global index with updated quota settings */
266 rec->qbr_granted = lqe->lqe_granted;
267 rec->qbr_hardlimit = lqe->lqe_hardlimit;
268 rec->qbr_softlimit = lqe->lqe_softlimit;
269 rec->qbr_time = lqe->lqe_gracetime;
271 /* write new quota settings */
272 rc = lquota_disk_write(env, th, LQE_GLB_OBJ(lqe), &lqe->lqe_id,
273 (struct dt_rec *)rec, flags, ver);
275 /* we failed to write the new quota settings to disk, report
276 * error to caller who will restore the initial value */
277 LQUOTA_ERROR(lqe, "failed to update global index, rc:%d", rc);
283 * Read from disk how much quota space is allocated to a slave.
284 * This is done by reading records from the dedicated slave index file.
285 * Return in \granted how much quota space is currently allocated to the
287 * The entry must be at least read locked.
289 * \param env - the environment passed by the caller
290 * \param lqe - is the quota entry associated with the identifier to look-up
292 * \param slv_obj - is the dt_object associated with the slave index
293 * \param granted - is the output parameter where to return how much space
294 * is granted to the slave.
296 * \retval - 0 on success, appropriate error on failure
298 int qmt_slv_read(const struct lu_env *env, struct lquota_entry *lqe,
299 struct dt_object *slv_obj, __u64 *granted)
301 struct qmt_thread_info *qti = qmt_info(env);
302 struct lquota_slv_rec *slv_rec = &qti->qti_slv_rec;
306 LASSERT(lqe != NULL);
307 LASSERT(lqe_is_master(lqe));
308 LASSERT(lqe_is_locked(lqe));
310 LQUOTA_DEBUG(lqe, "read slv "DFID,
311 PFID(lu_object_fid(&slv_obj->do_lu)));
313 /* read slave record from disk */
314 rc = lquota_disk_read(env, slv_obj, &lqe->lqe_id,
315 (struct dt_rec *)slv_rec);
321 /* extract granted from on-disk record */
322 *granted = slv_rec->qsr_granted;
325 LQUOTA_ERROR(lqe, "failed to read slave record "DFID,
326 PFID(lu_object_fid(&slv_obj->do_lu)));
330 LQUOTA_DEBUG(lqe, "successful slv read "LPU64, *granted);
336 * Update record in slave index file.
337 * The entry must be at least read locked.
339 * \param env - the environment passed by the caller
340 * \param th - is the transaction handle to be used for the disk writes
341 * \param lqe - is the dirty quota entry which will be updated at the same time
343 * \param slv_obj - is the dt_object associated with the slave index
344 * \param flags - can be LQUOTA_BUMP_VER or LQUOTA_SET_VER.
345 * \param ver - is used to return the new version of the index.
346 * \param granted - is the new amount of quota space owned by the slave
348 * \retval - 0 on success, appropriate error on failure
350 int qmt_slv_write(const struct lu_env *env, struct thandle *th,
351 struct lquota_entry *lqe, struct dt_object *slv_obj,
352 __u32 flags, __u64 *ver, __u64 granted)
354 struct qmt_thread_info *qti = qmt_info(env);
355 struct lquota_slv_rec *rec;
359 LASSERT(lqe != NULL);
360 LASSERT(lqe_is_master(lqe));
361 LASSERT(lqe_is_locked(lqe));
363 LQUOTA_DEBUG(lqe, "write slv "DFID" granted:"LPU64,
364 PFID(lu_object_fid(&slv_obj->do_lu)), granted);
366 /* never delete the entry, otherwise, it'll not be transferred
367 * to slave during reintegration. */
368 rec = &qti->qti_slv_rec;
370 /* updated space granted to this slave */
371 rec->qsr_granted = granted;
373 /* write new granted space */
374 rc = lquota_disk_write(env, th, slv_obj, &lqe->lqe_id,
375 (struct dt_rec *)rec, flags, ver);
377 LQUOTA_ERROR(lqe, "failed to update slave index "DFID" granted:"
378 LPU64, PFID(lu_object_fid(&slv_obj->do_lu)),
387 * Check whether new limits are valid for this pool
389 * \param lqe - is the quota entry subject to the setquota
390 * \param hard - is the new hard limit
391 * \param soft - is the new soft limit
393 int qmt_validate_limits(struct lquota_entry *lqe, __u64 hard, __u64 soft)
397 if (hard != 0 && soft > hard)
398 /* soft limit must be less than hard limit */
404 * Set/clear edquot flag after quota space allocation/release or settings
405 * change. Slaves will be notified of changes via glimpse on per-ID lock
407 * \param lqe - is the quota entry to check
408 * \param now - is the current time in second used for grace time managment
410 void qmt_adjust_edquot(struct lquota_entry *lqe, __u64 now)
412 struct qmt_pool_info *pool = lqe2qpi(lqe);
415 if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0)
418 if (!lqe->lqe_edquot) {
419 /* space exhausted flag not set, let's check whether it is time
422 if (!qmt_space_exhausted(lqe, now))
423 /* the qmt still has available space */
426 /* See comment in qmt_adjust_qunit(). LU-4139 */
427 if (qmt_hard_exhausted(lqe) ||
428 pool->qpi_key >> 16 == LQUOTA_RES_MD) {
429 /* we haven't reached the minimal qunit yet so there is
430 * still hope that the rebalancing process might free
431 * up some quota space */
432 if (lqe->lqe_qunit != pool->qpi_least_qunit)
435 /* least qunit value not sent to all slaves yet */
436 if (lqe->lqe_revoke_time == 0)
439 /* Let's give more time to slave to release space */
440 if (lqe->lqe_may_rel != 0 &&
441 cfs_time_before_64(cfs_time_shift_64(
443 lqe->lqe_revoke_time))
446 /* When exceeding softlimit, block qunit will be shrunk
447 * to (4 * least_qunit) finally. */
448 if (lqe->lqe_qunit > (pool->qpi_least_qunit << 2))
452 /* set edquot flag */
453 lqe->lqe_edquot = true;
455 /* space exhausted flag set, let's check whether it is time to
458 if (qmt_space_exhausted(lqe, now))
459 /* the qmt still has not space */
462 if (lqe->lqe_hardlimit != 0 &&
463 lqe->lqe_granted + pool->qpi_least_qunit >
465 /* we clear the flag only once at least one least qunit
469 /* clear edquot flag */
470 lqe->lqe_edquot = false;
473 LQUOTA_DEBUG(lqe, "changing edquot flag");
475 /* let's notify slave by issuing glimpse on per-ID lock.
476 * the rebalance thread will take care of this */
477 qmt_id_lock_notify(pool->qpi_qmt, lqe);
481 /* Using least_qunit when over block softlimit will seriously impact the
482 * write performance, we need to do some special tweaking on that. */
483 static __u64 qmt_calc_softlimit(struct lquota_entry *lqe, bool *oversoft)
485 struct qmt_pool_info *pool = lqe2qpi(lqe);
487 LASSERT(lqe->lqe_softlimit != 0);
489 /* No need to do special tweaking for inode limit */
490 if (pool->qpi_key >> 16 == LQUOTA_RES_MD)
491 return lqe->lqe_softlimit;
493 /* Added (least_qunit * 4) as margin */
494 if (lqe->lqe_granted <= lqe->lqe_softlimit +
495 (pool->qpi_least_qunit << 2)) {
496 return lqe->lqe_softlimit;
497 } else if (lqe->lqe_hardlimit != 0) {
499 return lqe->lqe_hardlimit;
507 * Try to grant more quota space back to slave.
509 * \param lqe - is the quota entry for which we would like to allocate more
511 * \param granted - is how much was already granted as part of the request
513 * \param spare - is how much unused quota space the slave already owns
515 * \retval return how additional space can be granted to the slave
517 __u64 qmt_alloc_expand(struct lquota_entry *lqe, __u64 granted, __u64 spare)
519 struct qmt_pool_info *pool = lqe2qpi(lqe);
520 __u64 remaining, qunit;
523 LASSERT(lqe->lqe_enforced && lqe->lqe_qunit != 0);
525 slv_cnt = lqe2qpi(lqe)->qpi_slv_nr[lqe->lqe_site->lqs_qtype];
526 qunit = lqe->lqe_qunit;
528 /* See comment in qmt_adjust_qunit(). LU-4139. */
529 if (lqe->lqe_softlimit != 0) {
531 remaining = qmt_calc_softlimit(lqe, &oversoft);
533 remaining = lqe->lqe_granted +
534 (pool->qpi_least_qunit << 2);
536 remaining = lqe->lqe_hardlimit;
539 if (lqe->lqe_granted >= remaining)
542 remaining -= lqe->lqe_granted;
548 granted &= (qunit - 1);
550 if (remaining > (slv_cnt * qunit) >> 1) {
551 /* enough room to grant more space w/o additional
552 * shrinking ... at least for now */
553 remaining -= (slv_cnt * qunit) >> 1;
554 } else if (qunit != pool->qpi_least_qunit) {
559 granted &= (qunit - 1);
561 RETURN(min_t(__u64, qunit - spare, remaining));
563 RETURN(min_t(__u64, qunit - granted, remaining));
564 } while (qunit >= pool->qpi_least_qunit);
570 * Adjust qunit size according to quota limits and total granted count.
571 * The caller must have locked the lqe.
573 * \param env - the environment passed by the caller
574 * \param lqe - is the qid entry to be adjusted
576 void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe)
578 struct qmt_pool_info *pool = lqe2qpi(lqe);
580 __u64 qunit, limit, qunit2 = 0;
583 LASSERT(lqe_is_locked(lqe));
585 if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0)
586 /* no quota limits */
589 /* record how many slaves have already registered */
590 slv_cnt = pool->qpi_slv_nr[lqe->lqe_site->lqs_qtype];
592 /* wait for at least one slave to join */
595 /* Qunit calculation is based on soft limit, if any, hard limit
596 * otherwise. This means that qunit is shrunk to the minimum when
597 * beyond the soft limit. This will impact performance, but that's the
598 * price of an accurate grace time management. */
599 if (lqe->lqe_softlimit != 0) {
601 /* As a compromise of write performance and the grace time
602 * accuracy, the block qunit size will be shrunk to
603 * (4 * least_qunit) when over softlimit. LU-4139. */
604 limit = qmt_calc_softlimit(lqe, &oversoft);
606 qunit2 = pool->qpi_least_qunit << 2;
608 GOTO(done, qunit = qunit2);
609 } else if (lqe->lqe_hardlimit != 0) {
610 limit = lqe->lqe_hardlimit;
612 LQUOTA_ERROR(lqe, "enforced bit set, but neither hard nor soft "
617 qunit = lqe->lqe_qunit == 0 ? pool->qpi_least_qunit : lqe->lqe_qunit;
619 /* The qunit value is computed as follows: limit / (2 * slv_cnt).
620 * Then 75% of the quota space can be granted with current qunit value.
621 * The remaining 25% are then used with reduced qunit size (by a factor
622 * of 4) which is then divided in a similar manner.
624 * |---------------------limit---------------------|
625 * |-------limit / 2-------|-limit / 4-|-limit / 4-|
626 * |qunit|qunit|qunit|qunit| | |
627 * |----slv_cnt * qunit----| | |
628 * |-grow limit-| | | |
629 * |--------------shrink limit---------| |
630 * |---space granted in qunit chunks---|-remaining-|
636 * qunit >>= 2; |qunit*slv_cnt|qunit*slv_cnt|
637 * |---space in qunit---|remain|
639 if (qunit == pool->qpi_least_qunit ||
640 limit >= lqe->lqe_granted + ((slv_cnt * qunit) >> 1)) {
641 /* current qunit value still fits, let's see if we can afford to
642 * increase qunit now ...
643 * To increase qunit again, we have to be under 25% */
644 while (limit >= lqe->lqe_granted + 6 * qunit * slv_cnt)
647 /* shrink qunit until we find a suitable value */
648 while (qunit > pool->qpi_least_qunit &&
649 limit < lqe->lqe_granted + ((slv_cnt * qunit) >> 1))
653 if (qunit2 && qunit > qunit2)
656 if (lqe->lqe_qunit == qunit)
657 /* keep current qunit */
660 LQUOTA_DEBUG(lqe, "%s qunit to "LPU64,
661 lqe->lqe_qunit < qunit ? "increasing" : "decreasing",
664 /* store new qunit value */
665 swap(lqe->lqe_qunit, qunit);
667 /* reset revoke time */
668 lqe->lqe_revoke_time = 0;
670 if (lqe->lqe_qunit < qunit)
671 /* let's notify slave of qunit shrinking */
672 qmt_id_lock_notify(pool->qpi_qmt, lqe);
673 else if (lqe->lqe_qunit == pool->qpi_least_qunit)
674 /* initial qunit value is the smallest one */
675 lqe->lqe_revoke_time = cfs_time_current_64();
680 * Adjust qunit & edquot flag in case it wasn't initialized already (e.g.
681 * limit set while no slaves were connected yet)
683 void qmt_revalidate(const struct lu_env *env, struct lquota_entry *lqe)
685 if (lqe->lqe_qunit == 0) {
686 /* lqe was read from disk, but neither qunit, nor edquot flag
687 * were initialized */
688 qmt_adjust_qunit(env, lqe);
689 if (lqe->lqe_qunit != 0)
690 qmt_adjust_edquot(lqe, cfs_time_current_sec());