4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LQUOTA
43 # include <linux/version.h>
44 # include <linux/module.h>
45 # include <linux/init.h>
46 # include <linux/fs.h>
47 # include <linux/jbd.h>
48 # include <linux/smp_lock.h>
49 # include <linux/buffer_head.h>
50 # include <linux/workqueue.h>
51 # include <linux/mount.h>
52 #else /* __KERNEL__ */
53 # include <liblustre.h>
56 #include <obd_class.h>
57 #include <lustre_mds.h>
58 #include <lustre_dlm.h>
59 #include <lustre_cfg.h>
61 #include <lustre_fsfilt.h>
62 #include <lustre_quota.h>
63 #include <lprocfs_status.h>
64 #include "quota_internal.h"
68 #ifdef HAVE_QUOTA_SUPPORT
70 static cfs_time_t last_print = 0;
71 static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
73 static int filter_quota_setup(struct obd_device *obd)
76 struct obd_device_target *obt = &obd->u.obt;
79 cfs_init_rwsem(&obt->obt_rwsem);
80 obt->obt_qfmt = LUSTRE_QUOTA_V2;
81 cfs_sema_init(&obt->obt_quotachecking, 1);
82 rc = qctxt_init(obd, NULL);
84 CERROR("initialize quota context failed! (rc:%d)\n", rc);
89 static int filter_quota_cleanup(struct obd_device *obd)
92 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
96 static int filter_quota_setinfo(struct obd_device *obd, void *data)
98 struct obd_export *exp = data;
99 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
100 struct obd_import *imp = exp->exp_imp_reverse;
103 LASSERT(imp != NULL);
105 /* setup the quota context import */
106 cfs_spin_lock(&qctxt->lqc_lock);
107 if (qctxt->lqc_import != NULL) {
108 cfs_spin_unlock(&qctxt->lqc_lock);
109 if (qctxt->lqc_import == imp)
110 CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
111 "activated already.\n", obd->obd_name, imp, obd);
113 CERROR("%s: lqc_import(%p:%p) of obd(%p) was "
114 "activated by others.\n", obd->obd_name,
115 qctxt->lqc_import, imp, obd);
117 qctxt->lqc_import = imp;
118 /* make imp's connect flags equal relative exp's connect flags
119 * adding it to avoid the scan export list */
120 imp->imp_connect_data.ocd_connect_flags |=
121 (exp->exp_connect_flags &
122 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
123 cfs_spin_unlock(&qctxt->lqc_lock);
124 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
125 "now.\n", obd->obd_name, imp, obd);
127 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
128 /* start quota slave recovery thread. (release high limits) */
129 qslave_start_recovery(obd, qctxt);
134 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
136 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
137 struct obd_import *imp = exp->exp_imp_reverse;
140 /* lquota may be not set up before destroying export, b=14896 */
141 if (!obd->obd_set_up)
144 if (unlikely(imp == NULL))
147 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
148 * should be invalid b=12374 */
149 cfs_spin_lock(&qctxt->lqc_lock);
150 if (qctxt->lqc_import == imp) {
151 qctxt->lqc_import = NULL;
152 cfs_spin_unlock(&qctxt->lqc_lock);
153 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
154 obd->obd_name, imp, obd);
155 ptlrpc_cleanup_imp(imp);
156 dqacq_interrupt(qctxt);
158 cfs_spin_unlock(&qctxt->lqc_lock);
163 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
167 if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
171 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
172 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
174 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
180 #define GET_OA_ID(flag, oa) (flag == USRQUOTA ? oa->o_uid : oa->o_gid)
181 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
183 struct obd_device_target *obt = &obd->u.obt;
184 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
185 int err, cnt, rc = 0;
186 struct obd_quotactl *oqctl;
189 if (!ll_sb_any_quota_active(obt->obt_sb))
192 OBD_ALLOC_PTR(oqctl);
196 /* set over quota flags for a uid/gid */
197 oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
198 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
200 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
201 struct lustre_qunit_size *lqs = NULL;
203 lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)),
207 CDEBUG(D_QUOTA, "search lqs for %s %d failed, "
209 cnt == USRQUOTA ? "user" : "group",
210 GET_OA_ID(cnt, oa), rc);
212 } else if (lqs == NULL) {
213 /* continue to check group quota if the file's owner
214 * doesn't have quota limit. LU-530 */
217 cfs_spin_lock(&lqs->lqs_lock);
218 if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
219 oa->o_flags |= (cnt == USRQUOTA) ?
220 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
221 cfs_spin_unlock(&lqs->lqs_lock);
222 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
223 "sync_blk(%d)\n", lqs->lqs_bunit_sz,
224 qctxt->lqc_sync_blk);
225 /* this is for quota_search_lqs */
229 cfs_spin_unlock(&lqs->lqs_lock);
230 /* this is for quota_search_lqs */
234 memset(oqctl, 0, sizeof(*oqctl));
236 oqctl->qc_cmd = Q_GETQUOTA;
237 oqctl->qc_type = cnt;
238 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
239 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
243 oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
245 CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, "
247 cnt == USRQUOTA ? "user" : "group",
248 cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err);
252 if (oqctl->qc_dqblk.dqb_bhardlimit &&
253 (toqb(oqctl->qc_dqblk.dqb_curspace) >=
254 oqctl->qc_dqblk.dqb_bhardlimit)) {
255 oa->o_flags |= (cnt == USRQUOTA) ?
256 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
257 CDEBUG(D_QUOTA, "out of quota for %s %d\n",
258 cnt == USRQUOTA ? "user" : "group",
259 cnt == USRQUOTA ? oa->o_uid : oa->o_gid);
267 * check whether the left quota of certain uid and gid can satisfy a block_write
268 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA
270 static int quota_check_common(struct obd_device *obd, const unsigned int id[],
271 int pending[], int count, int cycle, int isblk,
272 struct inode *inode, int frags)
274 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
276 struct qunit_data qdata[MAXQUOTAS];
278 int rc = 0, rc2[2] = { 0, 0 };
281 cfs_spin_lock(&qctxt->lqc_lock);
282 if (!qctxt->lqc_valid){
283 cfs_spin_unlock(&qctxt->lqc_lock);
286 cfs_spin_unlock(&qctxt->lqc_lock);
288 for (i = 0; i < MAXQUOTAS; i++) {
289 struct lustre_qunit_size *lqs = NULL;
291 qdata[i].qd_id = id[i];
292 qdata[i].qd_flags = i;
294 QDATA_SET_BLK(&qdata[i]);
295 qdata[i].qd_count = 0;
297 /* ignore root user */
298 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
301 lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
302 if (lqs == NULL || IS_ERR(lqs))
306 CERROR("can not find lqs for check_common: "
307 "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n",
308 id[i], i % 2 ? 'g': 'u', isblk, count,
310 RETURN(PTR_ERR(lqs));
313 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
314 cfs_spin_lock(&lqs->lqs_lock);
317 pending[i] = count * CFS_PAGE_SIZE;
318 /* in order to complete this write, we need extra
319 * meta blocks. This function can get it through
320 * data needed to be written b=16542 */
323 rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
327 CERROR("%s: can't get extra "
333 LASSERTF(pending[i] >= 0, "pending is not valid"
334 ", count=%d, mb=%d\n", count, mb);
335 lqs->lqs_bwrite_pending += pending[i];
338 lqs->lqs_iwrite_pending += pending[i];
342 /* if xx_rec < 0, that means quota are releasing,
343 * and it may return before we use quota. So if
344 * we find this situation, we assuming it has
345 * returned b=18491 */
346 if (isblk && lqs->lqs_blk_rec < 0) {
347 if (qdata[i].qd_count < -lqs->lqs_blk_rec)
348 qdata[i].qd_count = 0;
350 qdata[i].qd_count += lqs->lqs_blk_rec;
352 if (!isblk && lqs->lqs_ino_rec < 0) {
353 if (qdata[i].qd_count < -lqs->lqs_ino_rec)
354 qdata[i].qd_count = 0;
356 qdata[i].qd_count += lqs->lqs_ino_rec;
359 CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]"
360 " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]"
361 " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count,
362 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
363 qdata[i].qd_count, mb, pending[i]);
364 if (rc2[i] == QUOTA_RET_OK) {
365 if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
366 rc2[i] = QUOTA_RET_ACQUOTA;
367 if (!isblk && qdata[i].qd_count <
368 lqs->lqs_iwrite_pending)
369 rc2[i] = QUOTA_RET_ACQUOTA;
372 cfs_spin_unlock(&lqs->lqs_lock);
374 if (lqs->lqs_blk_rec < 0 &&
376 lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
377 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
379 /* When cycle is zero, lqs_*_pending will be changed. We will
380 * get reference of the lqs here and put reference of lqs in
381 * quota_pending_commit b=14784 */
385 /* this is for quota_search_lqs */
389 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
390 RETURN(QUOTA_RET_ACQUOTA);
395 int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag)
397 struct lustre_qunit_size *lqs;
400 if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
403 for (i = 0; i < MAXQUOTAS; i++) {
404 lqs = quota_search_lqs(LQS_KEY(i, id[i]),
405 &obd->u.obt.obt_qctxt, 0);
406 if (lqs && !IS_ERR(lqs)) {
407 if (lqs->lqs_flags & flag)
416 static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
417 const unsigned int id[], int pending[],
418 int count, quota_acquire acquire,
419 struct obd_trans_info *oti, int isblk,
420 struct inode *inode, int frags)
422 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
423 struct timeval work_start;
424 struct timeval work_end;
426 struct l_wait_info lwi = { 0 };
427 int rc = 0, cycle = 0, count_err = 1;
430 if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
433 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
434 /* If the client has been evicted or if it
435 * timed out and tried to reconnect already,
436 * abort the request immediately */
439 CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
440 pending[USRQUOTA] = pending[GRPQUOTA] = 0;
441 /* Unfortunately, if quota master is too busy to handle the
442 * pre-dqacq in time and quota hash on ost is used up, we
443 * have to wait for the completion of in flight dqacq/dqrel,
444 * in order to get enough quota for write b=12588 */
445 cfs_gettimeofday(&work_start);
446 while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
450 cfs_spin_lock(&qctxt->lqc_lock);
451 if (!qctxt->lqc_import && oti) {
452 cfs_spin_unlock(&qctxt->lqc_lock);
453 LASSERT(oti->oti_thread);
454 /* The recovery thread doesn't have watchdog
455 * attached. LU-369 */
456 if (oti->oti_thread->t_watchdog)
457 lc_watchdog_disable(oti->oti_thread->\
459 CDEBUG(D_QUOTA, "sleep for quota master\n");
460 l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
462 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
463 if (oti->oti_thread->t_watchdog)
464 lc_watchdog_touch(oti->oti_thread->t_watchdog,
465 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
467 cfs_spin_unlock(&qctxt->lqc_lock);
472 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
473 /* after acquire(), we should run quota_check_common again
474 * so that we confirm there are enough quota to finish write */
475 rc = acquire(obd, id, oti, isblk);
477 /* please reference to dqacq_completion for the below */
478 /* a new request is finished, try again */
479 if (rc == QUOTA_REQ_RETURNED) {
480 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
484 /* it is out of quota already */
486 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
490 /* Related quota has been disabled by master, but enabled by
491 * slave, do not try again. */
492 if (unlikely(rc == -ESRCH)) {
493 CERROR("mismatched quota configuration, stop try.\n");
497 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
498 /* The client has been evicted or tried to
499 * to reconnect already, abort the request */
502 /* -EBUSY and others, wait a second and try again */
505 struct l_wait_info lwi;
507 if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
508 lc_watchdog_touch(oti->oti_thread->t_watchdog,
509 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
510 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
513 cfs_waitq_init(&waitq);
514 lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
516 l_wait_event(waitq, 0, &lwi);
519 if (rc < 0 || cycle % 10 == 0) {
520 cfs_spin_lock(&last_print_lock);
521 if (last_print == 0 ||
522 cfs_time_before((last_print + cfs_time_seconds(30)),
523 cfs_time_current())) {
524 last_print = cfs_time_current();
525 cfs_spin_unlock(&last_print_lock);
526 CWARN("still haven't managed to acquire quota "
527 "space from the quota master after %d "
528 "retries (err=%d, rc=%d)\n",
529 cycle, count_err - 1, rc);
531 cfs_spin_unlock(&last_print_lock);
535 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
538 cfs_gettimeofday(&work_end);
539 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
540 lprocfs_counter_add(qctxt->lqc_stats,
541 isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
542 LQUOTA_WAIT_FOR_CHK_INO,
551 * when a block_write or inode_create rpc is finished, adjust the record for
552 * pending blocks and inodes
554 static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
555 int pending[], int isblk)
557 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
558 struct timeval work_start;
559 struct timeval work_end;
562 struct qunit_data qdata[MAXQUOTAS];
565 CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
566 CLASSERT(MAXQUOTAS < 4);
567 if (!ll_sb_any_quota_active(qctxt->lqc_sb))
570 cfs_gettimeofday(&work_start);
571 for (i = 0; i < MAXQUOTAS; i++) {
572 struct lustre_qunit_size *lqs = NULL;
574 LASSERT(pending[i] >= 0);
578 qdata[i].qd_id = id[i];
579 qdata[i].qd_flags = i;
581 QDATA_SET_BLK(&qdata[i]);
582 qdata[i].qd_count = 0;
584 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
587 lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0);
588 if (lqs == NULL || IS_ERR(lqs)) {
589 CERROR("can not find lqs for pending_commit: "
590 "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), "
591 "maybe cause unexpected lqs refcount error!\n",
592 id[i], i ? 'g': 'u', pending[i], isblk,
593 lqs ? PTR_ERR(lqs) : -1);
597 cfs_spin_lock(&lqs->lqs_lock);
599 LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
600 "there are too many blocks! [id %u] [%c] "
601 "[bwrite_pending %lu] [pending %u]\n",
602 id[i], i % 2 ? 'g' : 'u',
603 lqs->lqs_bwrite_pending, pending[i]);
605 lqs->lqs_bwrite_pending -= pending[i];
607 LASSERTF(lqs->lqs_iwrite_pending >= pending[i],
608 "there are too many files! [id %u] [%c] "
609 "[iwrite_pending %lu] [pending %u]\n",
610 id[i], i % 2 ? 'g' : 'u',
611 lqs->lqs_iwrite_pending, pending[i]);
613 lqs->lqs_iwrite_pending -= pending[i];
615 CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n",
617 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
618 i, pending[i], isblk);
619 cfs_spin_unlock(&lqs->lqs_lock);
621 /* for quota_search_lqs in pending_commit */
623 /* for quota_search_lqs in quota_check */
626 cfs_gettimeofday(&work_end);
627 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
628 lprocfs_counter_add(qctxt->lqc_stats,
629 isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
630 LQUOTA_WAIT_FOR_COMMIT_INO,
636 static int mds_quota_init(void)
638 return lustre_dquot_init();
641 static int mds_quota_exit(void)
647 static int mds_quota_setup(struct obd_device *obd)
649 struct obd_device_target *obt = &obd->u.obt;
650 struct mds_obd *mds = &obd->u.mds;
654 if (unlikely(mds->mds_quota)) {
655 CWARN("try to reinitialize quota context!\n");
659 cfs_init_rwsem(&obt->obt_rwsem);
660 obt->obt_qfmt = LUSTRE_QUOTA_V2;
661 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
662 cfs_sema_init(&obt->obt_quotachecking, 1);
663 /* initialize quota master and quota context */
664 cfs_init_rwsem(&mds->mds_qonoff_sem);
665 rc = qctxt_init(obd, dqacq_handler);
667 CERROR("%s: initialize quota context failed! (rc:%d)\n",
675 static int mds_quota_cleanup(struct obd_device *obd)
678 if (unlikely(!obd->u.mds.mds_quota))
681 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
685 static int mds_quota_setinfo(struct obd_device *obd, void *data)
687 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
690 if (unlikely(!obd->u.mds.mds_quota))
694 QUOTA_MASTER_READY(qctxt);
696 QUOTA_MASTER_UNREADY(qctxt);
700 static int mds_quota_fs_cleanup(struct obd_device *obd)
702 struct mds_obd *mds = &obd->u.mds;
703 struct obd_quotactl oqctl;
706 if (unlikely(!mds->mds_quota))
710 memset(&oqctl, 0, sizeof(oqctl));
711 oqctl.qc_type = UGQUOTA;
713 cfs_down_write(&mds->mds_qonoff_sem);
714 mds_admin_quota_off(obd, &oqctl);
715 cfs_up_write(&mds->mds_qonoff_sem);
719 static int quota_acquire_common(struct obd_device *obd, const unsigned int id[],
720 struct obd_trans_info *oti, int isblk)
722 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
726 rc = qctxt_adjust_qunit(obd, qctxt, id, isblk, 1, oti);
730 #endif /* HAVE_QUOTA_SUPPORT */
731 #endif /* __KERNEL__ */
734 #ifdef HAVE_QUOTA_SUPPORT
735 quota_interface_t mds_quota_interface = {
736 .quota_init = mds_quota_init,
737 .quota_exit = mds_quota_exit,
738 .quota_setup = mds_quota_setup,
739 .quota_cleanup = mds_quota_cleanup,
740 .quota_check = target_quota_check,
741 .quota_ctl = mds_quota_ctl,
742 .quota_setinfo = mds_quota_setinfo,
743 .quota_fs_cleanup = mds_quota_fs_cleanup,
744 .quota_recovery = mds_quota_recovery,
745 .quota_adjust = mds_quota_adjust,
746 .quota_chkquota = quota_chk_acq_common,
747 .quota_acquire = quota_acquire_common,
748 .quota_pending_commit = quota_pending_commit,
751 quota_interface_t filter_quota_interface = {
752 .quota_setup = filter_quota_setup,
753 .quota_cleanup = filter_quota_cleanup,
754 .quota_check = target_quota_check,
755 .quota_ctl = filter_quota_ctl,
756 .quota_setinfo = filter_quota_setinfo,
757 .quota_clearinfo = filter_quota_clearinfo,
758 .quota_enforce = filter_quota_enforce,
759 .quota_getflag = filter_quota_getflag,
760 .quota_acquire = quota_acquire_common,
761 .quota_adjust = filter_quota_adjust,
762 .quota_chkquota = quota_chk_acq_common,
763 .quota_adjust_qunit = filter_quota_adjust_qunit,
764 .quota_pending_commit = quota_pending_commit,
767 #endif /* __KERNEL__ */
771 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
773 static int __init init_lustre_quota(void)
775 #ifdef HAVE_QUOTA_SUPPORT
778 lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
781 if (IS_ERR(lquota_type_proc_dir)) {
782 CERROR("LProcFS failed in lquota-init\n");
783 rc = PTR_ERR(lquota_type_proc_dir);
787 rc = qunit_cache_init();
791 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
792 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
797 static void /*__exit*/ exit_lustre_quota(void)
799 #ifdef HAVE_QUOTA_SUPPORT
800 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
801 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
803 qunit_cache_cleanup();
805 if (lquota_type_proc_dir)
806 lprocfs_remove(&lquota_type_proc_dir);
810 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
811 MODULE_DESCRIPTION("Lustre Quota");
812 MODULE_LICENSE("GPL");
814 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
816 #ifdef HAVE_QUOTA_SUPPORT
817 EXPORT_SYMBOL(mds_quota_interface);
818 EXPORT_SYMBOL(filter_quota_interface);
820 #endif /* __KERNEL */