4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2013, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include "qmt_internal.h"
36 * Initialize qmt-specific fields of quota entry.
38 * \param lqe - is the quota entry to initialize
39 * \param arg - is the pointer to the qmt_pool_info structure
41 static void qmt_lqe_init(struct lquota_entry *lqe, void *arg)
43 LASSERT(lqe_is_master(lqe));
45 lqe->lqe_revoke_time = 0;
46 init_rwsem(&lqe->lqe_sem);
50 * Update a lquota entry. This is done by reading quota settings from the global
51 * index. The lquota entry must be write locked.
53 * \param env - the environment passed by the caller
54 * \param lqe - is the quota entry to refresh
55 * \param arg - is the pointer to the qmt_pool_info structure
57 static int qmt_lqe_read(const struct lu_env *env, struct lquota_entry *lqe,
60 struct qmt_thread_info *qti = qmt_info(env);
61 struct qmt_pool_info *pool = (struct qmt_pool_info *)arg;
65 LASSERT(lqe_is_master(lqe));
67 /* read record from disk */
68 rc = lquota_disk_read(env, pool->qpi_glb_obj[lqe->lqe_site->lqs_qtype],
69 &lqe->lqe_id, (struct dt_rec *)&qti->qti_glb_rec);
73 /* no such entry, assume quota isn't enforced for this user */
74 lqe->lqe_enforced = false;
77 /* copy quota settings from on-disk record */
78 lqe->lqe_granted = qti->qti_glb_rec.qbr_granted;
79 lqe->lqe_hardlimit = qti->qti_glb_rec.qbr_hardlimit;
80 lqe->lqe_softlimit = qti->qti_glb_rec.qbr_softlimit;
81 lqe->lqe_gracetime = qti->qti_glb_rec.qbr_time;
83 if (lqe->lqe_hardlimit == 0 && lqe->lqe_softlimit == 0)
84 /* {hard,soft}limit=0 means no quota enforced */
85 lqe->lqe_enforced = false;
87 lqe->lqe_enforced = true;
91 LQUOTA_ERROR(lqe, "failed to read quota entry from disk, rc:%d",
96 LQUOTA_DEBUG(lqe, "read");
101 * Print lqe information for debugging.
103 * \param lqe - is the quota entry to debug
104 * \param arg - is the pointer to the qmt_pool_info structure
105 * \param msgdata - debug message
106 * \param fmt - format of debug message
108 static void qmt_lqe_debug(struct lquota_entry *lqe, void *arg,
109 struct libcfs_debug_msg_data *msgdata,
110 const char *fmt, va_list args)
112 struct qmt_pool_info *pool = (struct qmt_pool_info *)arg;
114 libcfs_debug_vmsg2(msgdata, fmt, args,
115 "qmt:%s pool:%d-%s id:%llu enforced:%d hard:%llu"
116 " soft:%llu granted:%llu time:%llu qunit:"
117 "%llu edquot:%d may_rel:%llu revoke:%llu\n",
118 pool->qpi_qmt->qmt_svname,
119 pool->qpi_key & 0x0000ffff,
120 RES_NAME(pool->qpi_key >> 16),
121 lqe->lqe_id.qid_uid, lqe->lqe_enforced,
122 lqe->lqe_hardlimit, lqe->lqe_softlimit,
123 lqe->lqe_granted, lqe->lqe_gracetime,
124 lqe->lqe_qunit, lqe->lqe_edquot, lqe->lqe_may_rel,
125 lqe->lqe_revoke_time);
129 * Vector of quota entry operations supported on the master
131 struct lquota_entry_operations qmt_lqe_ops = {
132 .lqe_init = qmt_lqe_init,
133 .lqe_read = qmt_lqe_read,
134 .lqe_debug = qmt_lqe_debug,
138 * Reserve enough credits to update records in both the global index and
139 * the slave index identified by \slv_obj
141 * \param env - is the environment passed by the caller
142 * \param lqe - is the quota entry associated with the identifier
143 * subject to the change
144 * \param slv_obj - is the dt_object associated with the index file
145 * \param restore - is a temporary storage for current quota settings which will
146 * be restored if something goes wrong at index update time.
148 struct thandle *qmt_trans_start_with_slv(const struct lu_env *env,
149 struct lquota_entry *lqe,
150 struct dt_object *slv_obj,
151 struct qmt_lqe_restore *restore)
153 struct qmt_device *qmt;
158 LASSERT(lqe != NULL);
159 LASSERT(lqe_is_master(lqe));
161 qmt = lqe2qpi(lqe)->qpi_qmt;
164 LQUOTA_DEBUG(lqe, "declare write for slv "DFID,
165 PFID(lu_object_fid(&slv_obj->do_lu)));
167 /* start transaction */
168 th = dt_trans_create(env, qmt->qmt_child);
173 /* quota settings on master are updated synchronously for the
177 /* reserve credits for global index update */
178 rc = lquota_disk_declare_write(env, th, LQE_GLB_OBJ(lqe), &lqe->lqe_id);
182 if (slv_obj != NULL) {
183 /* reserve credits for slave index update */
184 rc = lquota_disk_declare_write(env, th, slv_obj, &lqe->lqe_id);
189 /* start transaction */
190 rc = dt_trans_start_local(env, qmt->qmt_child, th);
197 dt_trans_stop(env, qmt->qmt_child, th);
199 LQUOTA_ERROR(lqe, "failed to slv declare write for "DFID
200 ", rc:%d", PFID(lu_object_fid(&slv_obj->do_lu)),
203 restore->qlr_hardlimit = lqe->lqe_hardlimit;
204 restore->qlr_softlimit = lqe->lqe_softlimit;
205 restore->qlr_gracetime = lqe->lqe_gracetime;
206 restore->qlr_granted = lqe->lqe_granted;
207 restore->qlr_qunit = lqe->lqe_qunit;
213 * Reserve enough credits to update a record in the global index
215 * \param env - is the environment passed by the caller
216 * \param lqe - is the quota entry to be modified in the global index
217 * \param restore - is a temporary storage for current quota settings which will
218 * be restored if something goes wrong at index update time.
220 struct thandle *qmt_trans_start(const struct lu_env *env,
221 struct lquota_entry *lqe,
222 struct qmt_lqe_restore *restore)
224 LQUOTA_DEBUG(lqe, "declare write");
225 return qmt_trans_start_with_slv(env, lqe, NULL, restore);
229 * Update record associated with a quota entry in the global index.
230 * If LQUOTA_BUMP_VER is set, then the global index version must also be
232 * The entry must be at least read locked, dirty and up-to-date.
234 * \param env - the environment passed by the caller
235 * \param th - is the transaction handle to be used for the disk writes
236 * \param lqe - is the quota entry to udpate
237 * \param obj - is the dt_object associated with the index file
238 * \param flags - can be LQUOTA_BUMP_VER or LQUOTA_SET_VER.
239 * \param ver - is used to return the new version of the index.
241 * \retval - 0 on success and lqe dirty flag cleared,
242 * appropriate error on failure and uptodate flag cleared.
244 int qmt_glb_write(const struct lu_env *env, struct thandle *th,
245 struct lquota_entry *lqe, __u32 flags, __u64 *ver)
247 struct qmt_thread_info *qti = qmt_info(env);
248 struct lquota_glb_rec *rec;
252 LASSERT(lqe != NULL);
253 LASSERT(lqe_is_master(lqe));
254 LASSERT(lqe_is_locked(lqe));
255 LASSERT(lqe->lqe_uptodate);
256 LASSERT((flags & ~(LQUOTA_BUMP_VER | LQUOTA_SET_VER)) == 0);
258 LQUOTA_DEBUG(lqe, "write glb");
260 /* never delete the entry even when the id isn't enforced and
261 * no any guota granted, otherwise, this entry will not be
262 * synced to slave during the reintegration. */
263 rec = &qti->qti_glb_rec;
265 /* fill global index with updated quota settings */
266 rec->qbr_granted = lqe->lqe_granted;
267 rec->qbr_hardlimit = lqe->lqe_hardlimit;
268 rec->qbr_softlimit = lqe->lqe_softlimit;
269 rec->qbr_time = lqe->lqe_gracetime;
271 /* write new quota settings */
272 rc = lquota_disk_write(env, th, LQE_GLB_OBJ(lqe), &lqe->lqe_id,
273 (struct dt_rec *)rec, flags, ver);
275 /* we failed to write the new quota settings to disk, report
276 * error to caller who will restore the initial value */
277 LQUOTA_ERROR(lqe, "failed to update global index, rc:%d", rc);
283 * Read from disk how much quota space is allocated to a slave.
284 * This is done by reading records from the dedicated slave index file.
285 * Return in \granted how much quota space is currently allocated to the
287 * The entry must be at least read locked.
289 * \param env - the environment passed by the caller
290 * \param lqe - is the quota entry associated with the identifier to look-up
292 * \param slv_obj - is the dt_object associated with the slave index
293 * \param granted - is the output parameter where to return how much space
294 * is granted to the slave.
296 * \retval - 0 on success, appropriate error on failure
298 int qmt_slv_read(const struct lu_env *env, struct lquota_entry *lqe,
299 struct dt_object *slv_obj, __u64 *granted)
301 struct qmt_thread_info *qti = qmt_info(env);
302 struct lquota_slv_rec *slv_rec = &qti->qti_slv_rec;
306 LASSERT(lqe != NULL);
307 LASSERT(lqe_is_master(lqe));
308 LASSERT(lqe_is_locked(lqe));
310 LQUOTA_DEBUG(lqe, "read slv "DFID,
311 PFID(lu_object_fid(&slv_obj->do_lu)));
313 /* read slave record from disk */
314 rc = lquota_disk_read(env, slv_obj, &lqe->lqe_id,
315 (struct dt_rec *)slv_rec);
321 /* extract granted from on-disk record */
322 *granted = slv_rec->qsr_granted;
325 LQUOTA_ERROR(lqe, "failed to read slave record "DFID,
326 PFID(lu_object_fid(&slv_obj->do_lu)));
330 LQUOTA_DEBUG(lqe, "successful slv read %llu", *granted);
336 * Update record in slave index file.
337 * The entry must be at least read locked.
339 * \param env - the environment passed by the caller
340 * \param th - is the transaction handle to be used for the disk writes
341 * \param lqe - is the dirty quota entry which will be updated at the same time
343 * \param slv_obj - is the dt_object associated with the slave index
344 * \param flags - can be LQUOTA_BUMP_VER or LQUOTA_SET_VER.
345 * \param ver - is used to return the new version of the index.
346 * \param granted - is the new amount of quota space owned by the slave
348 * \retval - 0 on success, appropriate error on failure
350 int qmt_slv_write(const struct lu_env *env, struct thandle *th,
351 struct lquota_entry *lqe, struct dt_object *slv_obj,
352 __u32 flags, __u64 *ver, __u64 granted)
354 struct qmt_thread_info *qti = qmt_info(env);
355 struct lquota_slv_rec *rec;
359 LASSERT(lqe != NULL);
360 LASSERT(lqe_is_master(lqe));
361 LASSERT(lqe_is_locked(lqe));
363 LQUOTA_DEBUG(lqe, "write slv "DFID" granted:%llu",
364 PFID(lu_object_fid(&slv_obj->do_lu)), granted);
366 /* never delete the entry, otherwise, it'll not be transferred
367 * to slave during reintegration. */
368 rec = &qti->qti_slv_rec;
370 /* updated space granted to this slave */
371 rec->qsr_granted = granted;
373 /* write new granted space */
374 rc = lquota_disk_write(env, th, slv_obj, &lqe->lqe_id,
375 (struct dt_rec *)rec, flags, ver);
377 LQUOTA_ERROR(lqe, "failed to update slave index "DFID" granted:"
378 "%llu", PFID(lu_object_fid(&slv_obj->do_lu)),
387 * Check whether new limits are valid for this pool
389 * \param lqe - is the quota entry subject to the setquota
390 * \param hard - is the new hard limit
391 * \param soft - is the new soft limit
393 int qmt_validate_limits(struct lquota_entry *lqe, __u64 hard, __u64 soft)
397 if (hard != 0 && soft > hard)
398 /* soft limit must be less than hard limit */
404 * Set/clear edquot flag after quota space allocation/release or settings
405 * change. Slaves will be notified of changes via glimpse on per-ID lock
407 * \param lqe - is the quota entry to check
408 * \param now - is the current time in second used for grace time managment
410 void qmt_adjust_edquot(struct lquota_entry *lqe, __u64 now)
412 struct qmt_pool_info *pool = lqe2qpi(lqe);
415 if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0)
418 if (!lqe->lqe_edquot) {
419 /* space exhausted flag not set, let's check whether it is time
422 if (!qmt_space_exhausted(lqe, now))
423 /* the qmt still has available space */
426 /* See comment in qmt_adjust_qunit(). LU-4139 */
427 if (qmt_hard_exhausted(lqe) ||
428 pool->qpi_key >> 16 != LQUOTA_RES_DT) {
429 /* we haven't reached the minimal qunit yet so there is
430 * still hope that the rebalancing process might free
431 * up some quota space */
432 if (lqe->lqe_qunit != pool->qpi_least_qunit)
435 /* least qunit value not sent to all slaves yet */
436 if (lqe->lqe_revoke_time == 0)
439 /* Let's give more time to slave to release space */
440 if (lqe->lqe_may_rel != 0 &&
441 cfs_time_before_64(cfs_time_shift_64(
443 lqe->lqe_revoke_time))
446 if (lqe->lqe_qunit > pool->qpi_soft_least_qunit)
450 /* set edquot flag */
451 lqe->lqe_edquot = true;
453 /* space exhausted flag set, let's check whether it is time to
456 if (qmt_space_exhausted(lqe, now))
457 /* the qmt still has not space */
460 if (lqe->lqe_hardlimit != 0 &&
461 lqe->lqe_granted + pool->qpi_least_qunit >
463 /* we clear the flag only once at least one least qunit
467 /* clear edquot flag */
468 lqe->lqe_edquot = false;
471 LQUOTA_DEBUG(lqe, "changing edquot flag");
473 /* let's notify slave by issuing glimpse on per-ID lock.
474 * the rebalance thread will take care of this */
475 qmt_id_lock_notify(pool->qpi_qmt, lqe);
479 /* Using least_qunit when over block softlimit will seriously impact the
480 * write performance, we need to do some special tweaking on that. */
481 static __u64 qmt_calc_softlimit(struct lquota_entry *lqe, bool *oversoft)
483 struct qmt_pool_info *pool = lqe2qpi(lqe);
485 LASSERT(lqe->lqe_softlimit != 0);
487 /* No need to do special tweaking for inode limit */
488 if (pool->qpi_key >> 16 != LQUOTA_RES_DT)
489 return lqe->lqe_softlimit;
491 if (lqe->lqe_granted <= lqe->lqe_softlimit +
492 pool->qpi_soft_least_qunit) {
493 return lqe->lqe_softlimit;
494 } else if (lqe->lqe_hardlimit != 0) {
496 return lqe->lqe_hardlimit;
504 * Try to grant more quota space back to slave.
506 * \param lqe - is the quota entry for which we would like to allocate more
508 * \param granted - is how much was already granted as part of the request
510 * \param spare - is how much unused quota space the slave already owns
512 * \retval return how additional space can be granted to the slave
514 __u64 qmt_alloc_expand(struct lquota_entry *lqe, __u64 granted, __u64 spare)
516 struct qmt_pool_info *pool = lqe2qpi(lqe);
517 __u64 remaining, qunit;
520 LASSERT(lqe->lqe_enforced && lqe->lqe_qunit != 0);
522 slv_cnt = lqe2qpi(lqe)->qpi_slv_nr[lqe->lqe_site->lqs_qtype];
523 qunit = lqe->lqe_qunit;
525 /* See comment in qmt_adjust_qunit(). LU-4139. */
526 if (lqe->lqe_softlimit != 0) {
528 remaining = qmt_calc_softlimit(lqe, &oversoft);
530 remaining = lqe->lqe_granted +
531 pool->qpi_soft_least_qunit;
533 remaining = lqe->lqe_hardlimit;
536 if (lqe->lqe_granted >= remaining)
539 remaining -= lqe->lqe_granted;
545 granted &= (qunit - 1);
547 if (remaining > (slv_cnt * qunit) >> 1) {
548 /* enough room to grant more space w/o additional
549 * shrinking ... at least for now */
550 remaining -= (slv_cnt * qunit) >> 1;
551 } else if (qunit != pool->qpi_least_qunit) {
556 granted &= (qunit - 1);
558 RETURN(min_t(__u64, qunit - spare, remaining));
560 RETURN(min_t(__u64, qunit - granted, remaining));
561 } while (qunit >= pool->qpi_least_qunit);
567 * Adjust qunit size according to quota limits and total granted count.
568 * The caller must have locked the lqe.
570 * \param env - the environment passed by the caller
571 * \param lqe - is the qid entry to be adjusted
573 void qmt_adjust_qunit(const struct lu_env *env, struct lquota_entry *lqe)
575 struct qmt_pool_info *pool = lqe2qpi(lqe);
577 __u64 qunit, limit, qunit2 = 0;
580 LASSERT(lqe_is_locked(lqe));
582 if (!lqe->lqe_enforced || lqe->lqe_id.qid_uid == 0)
583 /* no quota limits */
586 /* record how many slaves have already registered */
587 slv_cnt = pool->qpi_slv_nr[lqe->lqe_site->lqs_qtype];
589 /* wait for at least one slave to join */
592 /* Qunit calculation is based on soft limit, if any, hard limit
593 * otherwise. This means that qunit is shrunk to the minimum when
594 * beyond the soft limit. This will impact performance, but that's the
595 * price of an accurate grace time management. */
596 if (lqe->lqe_softlimit != 0) {
598 /* As a compromise of write performance and the grace time
599 * accuracy, the block qunit size will be shrunk to
600 * qpi_soft_least_qunit when over softlimit. LU-4139. */
601 limit = qmt_calc_softlimit(lqe, &oversoft);
603 qunit2 = pool->qpi_soft_least_qunit;
605 GOTO(done, qunit = qunit2);
606 } else if (lqe->lqe_hardlimit != 0) {
607 limit = lqe->lqe_hardlimit;
609 LQUOTA_ERROR(lqe, "enforced bit set, but neither hard nor soft "
614 qunit = lqe->lqe_qunit == 0 ? pool->qpi_least_qunit : lqe->lqe_qunit;
616 /* The qunit value is computed as follows: limit / (2 * slv_cnt).
617 * Then 75% of the quota space can be granted with current qunit value.
618 * The remaining 25% are then used with reduced qunit size (by a factor
619 * of 4) which is then divided in a similar manner.
621 * |---------------------limit---------------------|
622 * |-------limit / 2-------|-limit / 4-|-limit / 4-|
623 * |qunit|qunit|qunit|qunit| | |
624 * |----slv_cnt * qunit----| | |
625 * |-grow limit-| | | |
626 * |--------------shrink limit---------| |
627 * |---space granted in qunit chunks---|-remaining-|
633 * qunit >>= 2; |qunit*slv_cnt|qunit*slv_cnt|
634 * |---space in qunit---|remain|
636 if (qunit == pool->qpi_least_qunit ||
637 limit >= lqe->lqe_granted + ((slv_cnt * qunit) >> 1)) {
638 /* current qunit value still fits, let's see if we can afford to
639 * increase qunit now ...
640 * To increase qunit again, we have to be under 25% */
641 while (qunit && limit >= lqe->lqe_granted + 6 * qunit * slv_cnt)
646 do_div(qunit, 2 * slv_cnt);
650 /* shrink qunit until we find a suitable value */
651 while (qunit > pool->qpi_least_qunit &&
652 limit < lqe->lqe_granted + ((slv_cnt * qunit) >> 1))
656 if (qunit2 && qunit > qunit2)
659 if (lqe->lqe_qunit == qunit)
660 /* keep current qunit */
663 LQUOTA_DEBUG(lqe, "%s qunit to %llu",
664 lqe->lqe_qunit < qunit ? "increasing" : "decreasing",
667 /* store new qunit value */
668 swap(lqe->lqe_qunit, qunit);
670 /* reset revoke time */
671 lqe->lqe_revoke_time = 0;
673 if (lqe->lqe_qunit < qunit)
674 /* let's notify slave of qunit shrinking */
675 qmt_id_lock_notify(pool->qpi_qmt, lqe);
676 else if (lqe->lqe_qunit == pool->qpi_least_qunit)
677 /* initial qunit value is the smallest one */
678 lqe->lqe_revoke_time = cfs_time_current_64();
683 * Adjust qunit & edquot flag in case it wasn't initialized already (e.g.
684 * limit set while no slaves were connected yet)
686 void qmt_revalidate(const struct lu_env *env, struct lquota_entry *lqe)
688 if (lqe->lqe_qunit == 0) {
689 /* lqe was read from disk, but neither qunit, nor edquot flag
690 * were initialized */
691 qmt_adjust_qunit(env, lqe);
692 if (lqe->lqe_qunit != 0)
693 qmt_adjust_edquot(lqe, cfs_time_current_sec());