1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 # define EXPORT_SYMTAB
43 #define DEBUG_SUBSYSTEM S_LQUOTA
46 # include <linux/version.h>
47 # include <linux/module.h>
48 # include <linux/init.h>
49 # include <linux/fs.h>
50 # include <linux/jbd.h>
51 # include <linux/smp_lock.h>
52 # include <linux/buffer_head.h>
53 # include <linux/workqueue.h>
54 # include <linux/mount.h>
55 #else /* __KERNEL__ */
56 # include <liblustre.h>
59 #include <obd_class.h>
60 #include <lustre_mds.h>
61 #include <lustre_dlm.h>
62 #include <lustre_cfg.h>
64 #include <lustre_fsfilt.h>
65 #include <lustre_quota.h>
66 #include <lprocfs_status.h>
67 #include "quota_internal.h"
71 #ifdef HAVE_QUOTA_SUPPORT
73 static cfs_time_t last_print = 0;
74 static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
76 static int filter_quota_setup(struct obd_device *obd)
79 struct obd_device_target *obt = &obd->u.obt;
82 cfs_init_rwsem(&obt->obt_rwsem);
83 obt->obt_qfmt = LUSTRE_QUOTA_V2;
84 cfs_sema_init(&obt->obt_quotachecking, 1);
85 rc = qctxt_init(obd, NULL);
87 CERROR("initialize quota context failed! (rc:%d)\n", rc);
92 static int filter_quota_cleanup(struct obd_device *obd)
95 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
99 static int filter_quota_setinfo(struct obd_device *obd, void *data)
101 struct obd_export *exp = data;
102 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
103 struct obd_import *imp = exp->exp_imp_reverse;
106 LASSERT(imp != NULL);
108 /* setup the quota context import */
109 cfs_spin_lock(&qctxt->lqc_lock);
110 if (qctxt->lqc_import != NULL) {
111 cfs_spin_unlock(&qctxt->lqc_lock);
112 if (qctxt->lqc_import == imp)
113 CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
114 "activated already.\n", obd->obd_name, imp, obd);
116 CERROR("%s: lqc_import(%p:%p) of obd(%p) was "
117 "activated by others.\n", obd->obd_name,
118 qctxt->lqc_import, imp, obd);
120 qctxt->lqc_import = imp;
121 /* make imp's connect flags equal relative exp's connect flags
122 * adding it to avoid the scan export list */
123 imp->imp_connect_data.ocd_connect_flags |=
124 (exp->exp_connect_flags &
125 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
126 cfs_spin_unlock(&qctxt->lqc_lock);
127 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
128 "now.\n", obd->obd_name, imp, obd);
130 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
131 /* start quota slave recovery thread. (release high limits) */
132 qslave_start_recovery(obd, qctxt);
137 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
139 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
140 struct obd_import *imp = exp->exp_imp_reverse;
143 /* lquota may be not set up before destroying export, b=14896 */
144 if (!obd->obd_set_up)
147 if (unlikely(imp == NULL))
150 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
151 * should be invalid b=12374 */
152 cfs_spin_lock(&qctxt->lqc_lock);
153 if (qctxt->lqc_import == imp) {
154 qctxt->lqc_import = NULL;
155 cfs_spin_unlock(&qctxt->lqc_lock);
156 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
157 obd->obd_name, imp, obd);
158 ptlrpc_cleanup_imp(imp);
159 dqacq_interrupt(qctxt);
161 cfs_spin_unlock(&qctxt->lqc_lock);
166 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
170 if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
174 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
175 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
177 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
183 #define GET_OA_ID(flag, oa) (flag == USRQUOTA ? oa->o_uid : oa->o_gid)
184 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
186 struct obd_device_target *obt = &obd->u.obt;
187 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
188 int err, cnt, rc = 0;
189 struct obd_quotactl *oqctl;
192 if (!ll_sb_any_quota_active(obt->obt_sb))
195 OBD_ALLOC_PTR(oqctl);
199 /* set over quota flags for a uid/gid */
200 oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
201 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
203 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
204 struct lustre_qunit_size *lqs = NULL;
206 lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)),
210 CDEBUG(D_QUOTA, "search lqs for %s %d failed, "
212 cnt == USRQUOTA ? "user" : "group",
213 GET_OA_ID(cnt, oa), rc);
215 } else if (lqs == NULL) {
216 /* continue to check group quota if the file's owner
217 * doesn't have quota limit. LU-530 */
220 cfs_spin_lock(&lqs->lqs_lock);
221 if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
222 oa->o_flags |= (cnt == USRQUOTA) ?
223 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
224 cfs_spin_unlock(&lqs->lqs_lock);
225 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
226 "sync_blk(%d)\n", lqs->lqs_bunit_sz,
227 qctxt->lqc_sync_blk);
228 /* this is for quota_search_lqs */
232 cfs_spin_unlock(&lqs->lqs_lock);
233 /* this is for quota_search_lqs */
237 memset(oqctl, 0, sizeof(*oqctl));
239 oqctl->qc_cmd = Q_GETQUOTA;
240 oqctl->qc_type = cnt;
241 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
242 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
246 oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
248 CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, "
250 cnt == USRQUOTA ? "user" : "group",
251 cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err);
255 if (oqctl->qc_dqblk.dqb_bhardlimit &&
256 (toqb(oqctl->qc_dqblk.dqb_curspace) >=
257 oqctl->qc_dqblk.dqb_bhardlimit)) {
258 oa->o_flags |= (cnt == USRQUOTA) ?
259 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
260 CDEBUG(D_QUOTA, "out of quota for %s %d\n",
261 cnt == USRQUOTA ? "user" : "group",
262 cnt == USRQUOTA ? oa->o_uid : oa->o_gid);
270 * check whether the left quota of certain uid and gid can satisfy a block_write
271 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA
273 static int quota_check_common(struct obd_device *obd, const unsigned int id[],
274 int pending[], int count, int cycle, int isblk,
275 struct inode *inode, int frags)
277 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
279 struct qunit_data qdata[MAXQUOTAS];
281 int rc = 0, rc2[2] = { 0, 0 };
284 cfs_spin_lock(&qctxt->lqc_lock);
285 if (!qctxt->lqc_valid){
286 cfs_spin_unlock(&qctxt->lqc_lock);
289 cfs_spin_unlock(&qctxt->lqc_lock);
291 for (i = 0; i < MAXQUOTAS; i++) {
292 struct lustre_qunit_size *lqs = NULL;
294 qdata[i].qd_id = id[i];
295 qdata[i].qd_flags = i;
297 QDATA_SET_BLK(&qdata[i]);
298 qdata[i].qd_count = 0;
300 /* ignore root user */
301 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
304 lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
305 if (lqs == NULL || IS_ERR(lqs))
309 CERROR("can not find lqs for check_common: "
310 "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n",
311 id[i], i % 2 ? 'g': 'u', isblk, count,
313 RETURN(PTR_ERR(lqs));
316 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
317 cfs_spin_lock(&lqs->lqs_lock);
320 pending[i] = count * CFS_PAGE_SIZE;
321 /* in order to complete this write, we need extra
322 * meta blocks. This function can get it through
323 * data needed to be written b=16542 */
326 rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
330 CERROR("%s: can't get extra "
336 lqs->lqs_bwrite_pending += pending[i];
339 lqs->lqs_iwrite_pending += pending[i];
343 /* if xx_rec < 0, that means quota are releasing,
344 * and it may return before we use quota. So if
345 * we find this situation, we assuming it has
346 * returned b=18491 */
347 if (isblk && lqs->lqs_blk_rec < 0) {
348 if (qdata[i].qd_count < -lqs->lqs_blk_rec)
349 qdata[i].qd_count = 0;
351 qdata[i].qd_count += lqs->lqs_blk_rec;
353 if (!isblk && lqs->lqs_ino_rec < 0) {
354 if (qdata[i].qd_count < -lqs->lqs_ino_rec)
355 qdata[i].qd_count = 0;
357 qdata[i].qd_count += lqs->lqs_ino_rec;
360 CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]"
361 " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]"
362 " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count,
363 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
364 qdata[i].qd_count, mb, pending[i]);
365 if (rc2[i] == QUOTA_RET_OK) {
366 if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
367 rc2[i] = QUOTA_RET_ACQUOTA;
368 if (!isblk && qdata[i].qd_count <
369 lqs->lqs_iwrite_pending)
370 rc2[i] = QUOTA_RET_ACQUOTA;
373 cfs_spin_unlock(&lqs->lqs_lock);
375 if (lqs->lqs_blk_rec < 0 &&
377 lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
378 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
380 /* When cycle is zero, lqs_*_pending will be changed. We will
381 * get reference of the lqs here and put reference of lqs in
382 * quota_pending_commit b=14784 */
386 /* this is for quota_search_lqs */
390 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
391 RETURN(QUOTA_RET_ACQUOTA);
396 int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag)
398 struct lustre_qunit_size *lqs;
401 if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
404 for (i = 0; i < MAXQUOTAS; i++) {
405 lqs = quota_search_lqs(LQS_KEY(i, id[i]),
406 &obd->u.obt.obt_qctxt, 0);
407 if (lqs && !IS_ERR(lqs)) {
408 if (lqs->lqs_flags & flag)
417 static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
418 const unsigned int id[], int pending[],
419 int count, quota_acquire acquire,
420 struct obd_trans_info *oti, int isblk,
421 struct inode *inode, int frags)
423 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
424 struct timeval work_start;
425 struct timeval work_end;
427 struct l_wait_info lwi = { 0 };
428 int rc = 0, cycle = 0, count_err = 1;
431 if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
434 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
435 /* If the client has been evicted or if it
436 * timed out and tried to reconnect already,
437 * abort the request immediately */
440 CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
441 pending[USRQUOTA] = pending[GRPQUOTA] = 0;
442 /* Unfortunately, if quota master is too busy to handle the
443 * pre-dqacq in time and quota hash on ost is used up, we
444 * have to wait for the completion of in flight dqacq/dqrel,
445 * in order to get enough quota for write b=12588 */
446 cfs_gettimeofday(&work_start);
447 while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
451 cfs_spin_lock(&qctxt->lqc_lock);
452 if (!qctxt->lqc_import && oti) {
453 cfs_spin_unlock(&qctxt->lqc_lock);
454 LASSERT(oti->oti_thread);
455 /* The recovery thread doesn't have watchdog
456 * attached. LU-369 */
457 if (oti->oti_thread->t_watchdog)
458 lc_watchdog_disable(oti->oti_thread->\
460 CDEBUG(D_QUOTA, "sleep for quota master\n");
461 l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
463 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
464 if (oti->oti_thread->t_watchdog)
465 lc_watchdog_touch(oti->oti_thread->t_watchdog,
466 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
468 cfs_spin_unlock(&qctxt->lqc_lock);
473 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
474 /* after acquire(), we should run quota_check_common again
475 * so that we confirm there are enough quota to finish write */
476 rc = acquire(obd, id, oti, isblk);
478 /* please reference to dqacq_completion for the below */
479 /* a new request is finished, try again */
480 if (rc == QUOTA_REQ_RETURNED) {
481 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
485 /* it is out of quota already */
487 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
491 /* Related quota has been disabled by master, but enabled by
492 * slave, do not try again. */
493 if (unlikely(rc == -ESRCH)) {
494 CERROR("mismatched quota configuration, stop try.\n");
498 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
499 /* The client has been evicted or tried to
500 * to reconnect already, abort the request */
503 /* -EBUSY and others, wait a second and try again */
506 struct l_wait_info lwi;
508 if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
509 lc_watchdog_touch(oti->oti_thread->t_watchdog,
510 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
511 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
514 cfs_waitq_init(&waitq);
515 lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
517 l_wait_event(waitq, 0, &lwi);
520 if (rc < 0 || cycle % 10 == 0) {
521 cfs_spin_lock(&last_print_lock);
522 if (last_print == 0 ||
523 cfs_time_before((last_print + cfs_time_seconds(30)),
524 cfs_time_current())) {
525 last_print = cfs_time_current();
526 cfs_spin_unlock(&last_print_lock);
527 CWARN("still haven't managed to acquire quota "
528 "space from the quota master after %d "
529 "retries (err=%d, rc=%d)\n",
530 cycle, count_err - 1, rc);
532 cfs_spin_unlock(&last_print_lock);
536 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
539 cfs_gettimeofday(&work_end);
540 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
541 lprocfs_counter_add(qctxt->lqc_stats,
542 isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
543 LQUOTA_WAIT_FOR_CHK_INO,
552 * when a block_write or inode_create rpc is finished, adjust the record for
553 * pending blocks and inodes
555 static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
556 int pending[], int isblk)
558 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
559 struct timeval work_start;
560 struct timeval work_end;
563 struct qunit_data qdata[MAXQUOTAS];
566 CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
567 CLASSERT(MAXQUOTAS < 4);
568 if (!ll_sb_any_quota_active(qctxt->lqc_sb))
571 cfs_gettimeofday(&work_start);
572 for (i = 0; i < MAXQUOTAS; i++) {
573 struct lustre_qunit_size *lqs = NULL;
575 LASSERT(pending[i] >= 0);
579 qdata[i].qd_id = id[i];
580 qdata[i].qd_flags = i;
582 QDATA_SET_BLK(&qdata[i]);
583 qdata[i].qd_count = 0;
585 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
588 lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0);
589 if (lqs == NULL || IS_ERR(lqs)) {
590 CERROR("can not find lqs for pending_commit: "
591 "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), "
592 "maybe cause unexpected lqs refcount error!\n",
593 id[i], i ? 'g': 'u', pending[i], isblk,
594 lqs ? PTR_ERR(lqs) : -1);
598 cfs_spin_lock(&lqs->lqs_lock);
600 LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
601 "there are too many blocks! [id %u] [%c] "
602 "[bwrite_pending %lu] [pending %u]\n",
603 id[i], i % 2 ? 'g' : 'u',
604 lqs->lqs_bwrite_pending, pending[i]);
606 lqs->lqs_bwrite_pending -= pending[i];
608 LASSERTF(lqs->lqs_iwrite_pending >= pending[i],
609 "there are too many files! [id %u] [%c] "
610 "[iwrite_pending %lu] [pending %u]\n",
611 id[i], i % 2 ? 'g' : 'u',
612 lqs->lqs_iwrite_pending, pending[i]);
614 lqs->lqs_iwrite_pending -= pending[i];
616 CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n",
618 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
619 i, pending[i], isblk);
620 cfs_spin_unlock(&lqs->lqs_lock);
622 /* for quota_search_lqs in pending_commit */
624 /* for quota_search_lqs in quota_check */
627 cfs_gettimeofday(&work_end);
628 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
629 lprocfs_counter_add(qctxt->lqc_stats,
630 isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
631 LQUOTA_WAIT_FOR_COMMIT_INO,
637 static int mds_quota_init(void)
639 return lustre_dquot_init();
642 static int mds_quota_exit(void)
648 static int mds_quota_setup(struct obd_device *obd)
650 struct obd_device_target *obt = &obd->u.obt;
651 struct mds_obd *mds = &obd->u.mds;
655 if (unlikely(mds->mds_quota)) {
656 CWARN("try to reinitialize quota context!\n");
660 cfs_init_rwsem(&obt->obt_rwsem);
661 obt->obt_qfmt = LUSTRE_QUOTA_V2;
662 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
663 cfs_sema_init(&obt->obt_quotachecking, 1);
664 /* initialize quota master and quota context */
665 cfs_init_rwsem(&mds->mds_qonoff_sem);
666 rc = qctxt_init(obd, dqacq_handler);
668 CERROR("%s: initialize quota context failed! (rc:%d)\n",
676 static int mds_quota_cleanup(struct obd_device *obd)
679 if (unlikely(!obd->u.mds.mds_quota))
682 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
686 static int mds_quota_setinfo(struct obd_device *obd, void *data)
688 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
691 if (unlikely(!obd->u.mds.mds_quota))
695 QUOTA_MASTER_READY(qctxt);
697 QUOTA_MASTER_UNREADY(qctxt);
701 static int mds_quota_fs_cleanup(struct obd_device *obd)
703 struct mds_obd *mds = &obd->u.mds;
704 struct obd_quotactl oqctl;
707 if (unlikely(!mds->mds_quota))
711 memset(&oqctl, 0, sizeof(oqctl));
712 oqctl.qc_type = UGQUOTA;
714 cfs_down_write(&mds->mds_qonoff_sem);
715 mds_admin_quota_off(obd, &oqctl);
716 cfs_up_write(&mds->mds_qonoff_sem);
720 static int quota_acquire_common(struct obd_device *obd, const unsigned int id[],
721 struct obd_trans_info *oti, int isblk)
723 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
727 rc = qctxt_adjust_qunit(obd, qctxt, id, isblk, 1, oti);
731 #endif /* HAVE_QUOTA_SUPPORT */
732 #endif /* __KERNEL__ */
734 struct osc_quota_info {
735 cfs_list_t oqi_hash; /* hash list */
736 struct client_obd *oqi_cli; /* osc obd */
737 unsigned int oqi_id; /* uid/gid of a file */
738 short oqi_type; /* quota type */
741 cfs_spinlock_t qinfo_list_lock = CFS_SPIN_LOCK_UNLOCKED;
743 static cfs_list_t qinfo_hash[NR_DQHASH];
744 /* SLAB cache for client quota context */
745 cfs_mem_cache_t *qinfo_cachep = NULL;
747 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
748 __attribute__((__const__));
750 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
752 unsigned long tmp = ((unsigned long)cli>>6) ^ id;
753 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
757 /* caller must hold qinfo_list_lock */
758 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
760 cfs_list_t *head = qinfo_hash +
761 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
763 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
764 cfs_list_add(&oqi->oqi_hash, head);
767 /* caller must hold qinfo_list_lock */
768 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
770 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
771 cfs_list_del_init(&oqi->oqi_hash);
774 /* caller must hold qinfo_list_lock */
775 static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
776 unsigned int id, int type)
778 unsigned int hashent = hashfn(cli, id, type);
779 struct osc_quota_info *oqi;
782 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
783 cfs_list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
784 if (oqi->oqi_cli == cli &&
785 oqi->oqi_id == id && oqi->oqi_type == type)
791 static struct osc_quota_info *alloc_qinfo(struct client_obd *cli,
792 unsigned int id, int type)
794 struct osc_quota_info *oqi;
797 OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_IO, sizeof(*oqi));
801 CFS_INIT_LIST_HEAD(&oqi->oqi_hash);
804 oqi->oqi_type = type;
809 static void free_qinfo(struct osc_quota_info *oqi)
811 OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
814 int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
817 int cnt, rc = QUOTA_OK;
820 cfs_spin_lock(&qinfo_list_lock);
821 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
822 struct osc_quota_info *oqi = NULL;
824 id = (cnt == USRQUOTA) ? qid[USRQUOTA] : qid[GRPQUOTA];
825 oqi = find_qinfo(cli, id, cnt);
831 cfs_spin_unlock(&qinfo_list_lock);
834 CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
835 cnt == USRQUOTA ? "user" : "group", id);
839 int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
840 obd_flag valid, obd_flag flags)
848 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
849 struct osc_quota_info *oqi = NULL, *old;
851 if (!(valid & ((cnt == USRQUOTA) ?
852 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
855 id = (cnt == USRQUOTA) ? qid[USRQUOTA] : qid[GRPQUOTA];
856 noquota = (cnt == USRQUOTA) ?
857 (flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
860 oqi = alloc_qinfo(cli, id, cnt);
863 CDEBUG(D_QUOTA, "setdq for %s %d failed, "
865 cnt == USRQUOTA ? "user" : "group",
871 cfs_spin_lock(&qinfo_list_lock);
872 old = find_qinfo(cli, id, cnt);
874 remove_qinfo_hash(old);
875 else if (!old && noquota)
876 insert_qinfo_hash(oqi);
877 cfs_spin_unlock(&qinfo_list_lock);
880 CDEBUG(D_QUOTA, "setdq to remove for %s %d\n",
881 cnt == USRQUOTA ? "user" : "group", id);
882 else if (!old && noquota)
883 CDEBUG(D_QUOTA, "setdq to insert for %s %d\n",
884 cnt == USRQUOTA ? "user" : "group", id);
897 int osc_quota_cleanup(struct obd_device *obd)
899 struct client_obd *cli = &obd->u.cli;
900 struct osc_quota_info *oqi, *n;
904 cfs_spin_lock(&qinfo_list_lock);
905 for (i = 0; i < NR_DQHASH; i++) {
906 cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
907 if (oqi->oqi_cli != cli)
909 remove_qinfo_hash(oqi);
913 cfs_spin_unlock(&qinfo_list_lock);
918 int osc_quota_init(void)
923 LASSERT(qinfo_cachep == NULL);
924 qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
925 sizeof(struct osc_quota_info),
930 for (i = 0; i < NR_DQHASH; i++)
931 CFS_INIT_LIST_HEAD(qinfo_hash + i);
936 int osc_quota_exit(void)
938 struct osc_quota_info *oqi, *n;
942 cfs_spin_lock(&qinfo_list_lock);
943 for (i = 0; i < NR_DQHASH; i++) {
944 cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
945 remove_qinfo_hash(oqi);
949 cfs_spin_unlock(&qinfo_list_lock);
951 rc = cfs_mem_cache_destroy(qinfo_cachep);
952 LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
959 #ifdef HAVE_QUOTA_SUPPORT
960 quota_interface_t mds_quota_interface = {
961 .quota_init = mds_quota_init,
962 .quota_exit = mds_quota_exit,
963 .quota_setup = mds_quota_setup,
964 .quota_cleanup = mds_quota_cleanup,
965 .quota_check = target_quota_check,
966 .quota_ctl = mds_quota_ctl,
967 .quota_setinfo = mds_quota_setinfo,
968 .quota_fs_cleanup = mds_quota_fs_cleanup,
969 .quota_recovery = mds_quota_recovery,
970 .quota_adjust = mds_quota_adjust,
971 .quota_chkquota = quota_chk_acq_common,
972 .quota_acquire = quota_acquire_common,
973 .quota_pending_commit = quota_pending_commit,
976 quota_interface_t filter_quota_interface = {
977 .quota_setup = filter_quota_setup,
978 .quota_cleanup = filter_quota_cleanup,
979 .quota_check = target_quota_check,
980 .quota_ctl = filter_quota_ctl,
981 .quota_setinfo = filter_quota_setinfo,
982 .quota_clearinfo = filter_quota_clearinfo,
983 .quota_enforce = filter_quota_enforce,
984 .quota_getflag = filter_quota_getflag,
985 .quota_acquire = quota_acquire_common,
986 .quota_adjust = filter_quota_adjust,
987 .quota_chkquota = quota_chk_acq_common,
988 .quota_adjust_qunit = filter_quota_adjust_qunit,
989 .quota_pending_commit = quota_pending_commit,
992 #endif /* __KERNEL__ */
994 quota_interface_t mdc_quota_interface = {
995 .quota_ctl = client_quota_ctl,
996 .quota_check = client_quota_check,
997 .quota_poll_check = client_quota_poll_check,
1000 quota_interface_t lmv_quota_interface = {
1001 .quota_ctl = lmv_quota_ctl,
1002 .quota_check = lmv_quota_check,
1005 quota_interface_t osc_quota_interface = {
1006 .quota_ctl = client_quota_ctl,
1007 .quota_check = client_quota_check,
1008 .quota_poll_check = client_quota_poll_check,
1009 .quota_init = osc_quota_init,
1010 .quota_exit = osc_quota_exit,
1011 .quota_chkdq = osc_quota_chkdq,
1012 .quota_setdq = osc_quota_setdq,
1013 .quota_cleanup = osc_quota_cleanup,
1014 .quota_adjust_qunit = client_quota_adjust_qunit,
1017 quota_interface_t lov_quota_interface = {
1018 .quota_ctl = lov_quota_ctl,
1019 .quota_check = lov_quota_check,
1020 .quota_adjust_qunit = lov_quota_adjust_qunit,
1025 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
1027 static int __init init_lustre_quota(void)
1029 #ifdef HAVE_QUOTA_SUPPORT
1032 lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
1035 if (IS_ERR(lquota_type_proc_dir)) {
1036 CERROR("LProcFS failed in lquota-init\n");
1037 rc = PTR_ERR(lquota_type_proc_dir);
1041 rc = qunit_cache_init();
1045 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
1046 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
1048 PORTAL_SYMBOL_REGISTER(mdc_quota_interface);
1049 PORTAL_SYMBOL_REGISTER(lmv_quota_interface);
1050 PORTAL_SYMBOL_REGISTER(osc_quota_interface);
1051 PORTAL_SYMBOL_REGISTER(lov_quota_interface);
1055 static void /*__exit*/ exit_lustre_quota(void)
1057 PORTAL_SYMBOL_UNREGISTER(mdc_quota_interface);
1058 PORTAL_SYMBOL_UNREGISTER(lmv_quota_interface);
1059 PORTAL_SYMBOL_UNREGISTER(osc_quota_interface);
1060 PORTAL_SYMBOL_UNREGISTER(lov_quota_interface);
1061 #ifdef HAVE_QUOTA_SUPPORT
1062 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
1063 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
1065 qunit_cache_cleanup();
1067 if (lquota_type_proc_dir)
1068 lprocfs_remove(&lquota_type_proc_dir);
1072 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1073 MODULE_DESCRIPTION("Lustre Quota");
1074 MODULE_LICENSE("GPL");
1076 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
1078 #ifdef HAVE_QUOTA_SUPPORT
1079 EXPORT_SYMBOL(mds_quota_interface);
1080 EXPORT_SYMBOL(filter_quota_interface);
1082 EXPORT_SYMBOL(mdc_quota_interface);
1083 EXPORT_SYMBOL(lmv_quota_interface);
1084 EXPORT_SYMBOL(osc_quota_interface);
1085 EXPORT_SYMBOL(lov_quota_interface);
1086 #endif /* __KERNEL */