1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LQUOTA
43 # include <linux/version.h>
44 # include <linux/module.h>
45 # include <linux/init.h>
46 # include <linux/fs.h>
47 # include <linux/jbd.h>
48 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
49 # include <linux/smp_lock.h>
50 # include <linux/buffer_head.h>
51 # include <linux/workqueue.h>
52 # include <linux/mount.h>
54 # include <linux/locks.h>
56 #else /* __KERNEL__ */
57 # include <liblustre.h>
60 #include <obd_class.h>
61 #include <lustre_mds.h>
62 #include <lustre_dlm.h>
63 #include <lustre_cfg.h>
65 #include <lustre_fsfilt.h>
66 #include <lustre_quota.h>
67 #include <lprocfs_status.h>
68 #include "quota_internal.h"
72 #ifdef HAVE_QUOTA_SUPPORT
74 static cfs_time_t last_print = 0;
75 static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
77 static int filter_quota_setup(struct obd_device *obd)
80 struct obd_device_target *obt = &obd->u.obt;
83 init_rwsem(&obt->obt_rwsem);
84 obt->obt_qfmt = LUSTRE_QUOTA_V2;
85 atomic_set(&obt->obt_quotachecking, 1);
86 rc = qctxt_init(obd, NULL);
88 CERROR("initialize quota context failed! (rc:%d)\n", rc);
93 static int filter_quota_cleanup(struct obd_device *obd)
96 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
100 static int filter_quota_setinfo(struct obd_device *obd, void *data)
102 struct obd_export *exp = data;
103 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
104 struct obd_import *imp;
107 /* setup the quota context import */
108 spin_lock(&qctxt->lqc_lock);
109 qctxt->lqc_import = exp->exp_imp_reverse;
110 spin_unlock(&qctxt->lqc_lock);
111 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated now, \n",
112 obd->obd_name,exp->exp_imp_reverse, obd);
114 /* make imp's connect flags equal relative exp's connect flags
115 * adding it to avoid the scan export list
117 imp = qctxt->lqc_import;
119 imp->imp_connect_data.ocd_connect_flags |=
120 (exp->exp_connect_flags &
121 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
123 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
124 /* start quota slave recovery thread. (release high limits) */
125 qslave_start_recovery(obd, qctxt);
129 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
131 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
134 /* lquota may be not set up before destroying export, b=14896 */
135 if (!obd->obd_set_up)
138 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
139 * should be invalid b=12374 */
140 if (qctxt->lqc_import && qctxt->lqc_import == exp->exp_imp_reverse) {
141 spin_lock(&qctxt->lqc_lock);
142 qctxt->lqc_import = NULL;
143 spin_unlock(&qctxt->lqc_lock);
144 CDEBUG(D_QUOTA, "%s: lqc_import of obd(%p) is invalid now.\n",
150 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
154 if (!sb_any_quota_enabled(obd->u.obt.obt_sb))
158 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
159 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
161 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
167 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
169 struct obd_device_target *obt = &obd->u.obt;
170 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
171 int err, cnt, rc = 0;
172 struct obd_quotactl *oqctl;
175 if (!sb_any_quota_enabled(obt->obt_sb))
178 OBD_ALLOC_PTR(oqctl);
180 CERROR("Not enough memory!");
184 /* set over quota flags for a uid/gid */
185 oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
186 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
188 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
189 struct quota_adjust_qunit oqaq_tmp;
190 struct lustre_qunit_size *lqs = NULL;
192 oqaq_tmp.qaq_flags = cnt;
193 oqaq_tmp.qaq_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
195 quota_search_lqs(NULL, &oqaq_tmp, qctxt, &lqs);
197 spin_lock(&lqs->lqs_lock);
198 if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
199 oa->o_flags |= (cnt == USRQUOTA) ?
200 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
201 spin_unlock(&lqs->lqs_lock);
202 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
203 "sync_blk(%d)\n", lqs->lqs_bunit_sz,
204 qctxt->lqc_sync_blk);
205 /* this is for quota_search_lqs */
209 spin_unlock(&lqs->lqs_lock);
210 /* this is for quota_search_lqs */
214 memset(oqctl, 0, sizeof(*oqctl));
216 oqctl->qc_cmd = Q_GETQUOTA;
217 oqctl->qc_type = cnt;
218 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
219 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
223 oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
228 if (oqctl->qc_dqblk.dqb_bhardlimit &&
229 (toqb(oqctl->qc_dqblk.dqb_curspace) >=
230 oqctl->qc_dqblk.dqb_bhardlimit))
231 oa->o_flags |= (cnt == USRQUOTA) ?
232 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
239 * check whether the left quota of certain uid and gid can satisfy a block_write
240 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA
242 static int quota_check_common(struct obd_device *obd, unsigned int uid,
243 unsigned int gid, int count, int cycle, int isblk)
245 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
247 __u32 id[MAXQUOTAS] = { uid, gid };
248 struct qunit_data qdata[MAXQUOTAS];
249 int rc = 0, rc2[2] = { 0, 0 };
252 CLASSERT(MAXQUOTAS < 4);
253 if (!sb_any_quota_enabled(qctxt->lqc_sb))
256 spin_lock(&qctxt->lqc_lock);
257 if (!qctxt->lqc_valid){
258 spin_unlock(&qctxt->lqc_lock);
261 spin_unlock(&qctxt->lqc_lock);
263 for (i = 0; i < MAXQUOTAS; i++) {
264 struct lustre_qunit_size *lqs = NULL;
266 qdata[i].qd_id = id[i];
267 qdata[i].qd_flags = i;
269 QDATA_SET_BLK(&qdata[i]);
270 qdata[i].qd_count = 0;
272 /* ignore root user */
273 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
276 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
280 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
281 spin_lock(&lqs->lqs_lock);
283 rc = QUOTA_RET_INC_PENDING;
285 lqs->lqs_bwrite_pending += count;
287 lqs->lqs_iwrite_pending += count;
289 if (rc2[i] == QUOTA_RET_OK) {
290 if (isblk && qdata[i].qd_count <
291 lqs->lqs_bwrite_pending * CFS_PAGE_SIZE)
292 rc2[i] = QUOTA_RET_ACQUOTA;
293 if (!isblk && qdata[i].qd_count <
294 lqs->lqs_iwrite_pending)
295 rc2[i] = QUOTA_RET_ACQUOTA;
297 spin_unlock(&lqs->lqs_lock);
298 CDEBUG(D_QUOTA, "count: %d, write pending: %lu, qd_count: "LPU64
300 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
303 /* When cycle is zero, lqs_*_pending will be changed. We will
304 * get reference of the lqs here and put reference of lqs in
305 * quota_pending_commit b=14784 */
309 /* this is for quota_search_lqs */
313 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
314 RETURN(rc | QUOTA_RET_ACQUOTA);
319 static int quota_chk_acq_common(struct obd_device *obd, unsigned int uid,
320 unsigned int gid, int count, int *pending,
321 quota_acquire acquire,
322 struct obd_trans_info *oti, int isblk)
324 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
325 struct timeval work_start;
326 struct timeval work_end;
328 struct l_wait_info lwi = { 0 };
329 int rc = 0, cycle = 0, count_err = 1;
332 CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
334 /* Unfortunately, if quota master is too busy to handle the
335 * pre-dqacq in time and quota hash on ost is used up, we
336 * have to wait for the completion of in flight dqacq/dqrel,
337 * in order to get enough quota for write b=12588 */
338 do_gettimeofday(&work_start);
339 while ((rc = quota_check_common(obd, uid, gid, count, cycle, isblk)) &
342 spin_lock(&qctxt->lqc_lock);
343 if (!qctxt->lqc_import && oti) {
344 spin_unlock(&qctxt->lqc_lock);
346 LASSERT(oti && oti->oti_thread &&
347 oti->oti_thread->t_watchdog);
349 lc_watchdog_disable(oti->oti_thread->t_watchdog);
350 CDEBUG(D_QUOTA, "sleep for quota master\n");
351 l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
353 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
354 lc_watchdog_touch(oti->oti_thread->t_watchdog);
356 spin_unlock(&qctxt->lqc_lock);
359 if (rc & QUOTA_RET_INC_PENDING)
364 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
365 /* after acquire(), we should run quota_check_common again
366 * so that we confirm there are enough quota to finish write */
367 rc = acquire(obd, uid, gid, oti, isblk);
369 /* please reference to dqacq_completion for the below */
370 /* a new request is finished, try again */
372 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
376 /* it is out of quota already */
378 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
382 /* -EBUSY and others, wait a second and try again */
385 struct l_wait_info lwi;
387 if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
388 lc_watchdog_touch(oti->oti_thread->t_watchdog);
389 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
392 init_waitqueue_head(&waitq);
393 lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
395 l_wait_event(waitq, 0, &lwi);
398 if (rc < 0 || cycle % 10 == 2) {
399 spin_lock(&last_print_lock);
400 if (last_print == 0 ||
401 cfs_time_before((last_print + cfs_time_seconds(30)),
402 cfs_time_current())) {
403 last_print = cfs_time_current();
404 spin_unlock(&last_print_lock);
405 CWARN("still haven't managed to acquire quota "
406 "space from the quota master after %d "
407 "retries (err=%d, rc=%d)\n",
408 cycle, count_err - 1, rc);
410 spin_unlock(&last_print_lock);
414 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
418 if (!cycle && rc & QUOTA_RET_INC_PENDING)
421 do_gettimeofday(&work_end);
422 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
423 lprocfs_counter_add(qctxt->lqc_stats,
424 isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
425 LQUOTA_WAIT_FOR_CHK_INO,
432 * when a block_write or inode_create rpc is finished, adjust the record for
433 * pending blocks and inodes
435 static int quota_pending_commit(struct obd_device *obd, unsigned int uid,
436 unsigned int gid, int count, int isblk)
438 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
439 struct timeval work_start;
440 struct timeval work_end;
443 __u32 id[MAXQUOTAS] = { uid, gid };
444 struct qunit_data qdata[MAXQUOTAS];
447 CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
448 CLASSERT(MAXQUOTAS < 4);
449 if (!sb_any_quota_enabled(qctxt->lqc_sb))
452 do_gettimeofday(&work_start);
453 for (i = 0; i < MAXQUOTAS; i++) {
454 struct lustre_qunit_size *lqs = NULL;
456 qdata[i].qd_id = id[i];
457 qdata[i].qd_flags = i;
459 QDATA_SET_BLK(&qdata[i]);
460 qdata[i].qd_count = 0;
462 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
465 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
468 CDEBUG(D_QUOTA, "pending: %lu, count: %d.\n",
469 isblk ? lqs->lqs_bwrite_pending :
470 lqs->lqs_iwrite_pending, count);
471 spin_lock(&lqs->lqs_lock);
473 if (lqs->lqs_bwrite_pending >= count) {
474 lqs->lqs_bwrite_pending -= count;
475 spin_unlock(&lqs->lqs_lock);
478 spin_unlock(&lqs->lqs_lock);
480 "there are too many blocks!\n");
483 if (lqs->lqs_iwrite_pending >= count) {
484 lqs->lqs_iwrite_pending -= count;
485 spin_unlock(&lqs->lqs_lock);
488 spin_unlock(&lqs->lqs_lock);
490 "there are too many files!\n");
495 /* When lqs_*_pening is changed back, we'll putref lqs
501 do_gettimeofday(&work_end);
502 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
503 lprocfs_counter_add(qctxt->lqc_stats,
504 isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
505 LQUOTA_WAIT_FOR_COMMIT_INO,
511 static int mds_quota_init(void)
513 return lustre_dquot_init();
516 static int mds_quota_exit(void)
522 static int mds_quota_setup(struct obd_device *obd)
524 struct obd_device_target *obt = &obd->u.obt;
525 struct mds_obd *mds = &obd->u.mds;
529 init_rwsem(&obt->obt_rwsem);
530 obt->obt_qfmt = LUSTRE_QUOTA_V2;
531 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
532 atomic_set(&obt->obt_quotachecking, 1);
533 /* initialize quota master and quota context */
534 sema_init(&mds->mds_qonoff_sem, 1);
535 rc = qctxt_init(obd, dqacq_handler);
537 CERROR("initialize quota context failed! (rc:%d)\n", rc);
544 static int mds_quota_cleanup(struct obd_device *obd)
547 obd->u.mds.mds_quota = 0;
548 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
552 static int mds_quota_setinfo(struct obd_device *obd, void *data)
554 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
558 QUOTA_MASTER_READY(qctxt);
560 QUOTA_MASTER_UNREADY(qctxt);
564 static int mds_quota_fs_cleanup(struct obd_device *obd)
566 struct mds_obd *mds = &obd->u.mds;
567 struct obd_quotactl oqctl;
570 memset(&oqctl, 0, sizeof(oqctl));
571 oqctl.qc_type = UGQUOTA;
573 down(&mds->mds_qonoff_sem);
574 mds_admin_quota_off(obd, &oqctl);
575 up(&mds->mds_qonoff_sem);
579 static int quota_acquire_common(struct obd_device *obd, unsigned int uid,
580 unsigned int gid, struct obd_trans_info *oti,
583 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
587 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, isblk, 1, oti);
591 #endif /* HAVE_QUOTA_SUPPORT */
592 #endif /* __KERNEL__ */
594 struct osc_quota_info {
595 struct list_head oqi_hash; /* hash list */
596 struct client_obd *oqi_cli; /* osc obd */
597 unsigned int oqi_id; /* uid/gid of a file */
598 short oqi_type; /* quota type */
601 spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
603 static struct list_head qinfo_hash[NR_DQHASH];
604 /* SLAB cache for client quota context */
605 cfs_mem_cache_t *qinfo_cachep = NULL;
607 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
608 __attribute__((__const__));
610 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
612 unsigned long tmp = ((unsigned long)cli>>6) ^ id;
613 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
617 /* caller must hold qinfo_list_lock */
618 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
620 struct list_head *head = qinfo_hash +
621 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
623 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
624 list_add(&oqi->oqi_hash, head);
627 /* caller must hold qinfo_list_lock */
628 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
630 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
631 list_del_init(&oqi->oqi_hash);
634 /* caller must hold qinfo_list_lock */
635 static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
636 unsigned int id, int type)
638 unsigned int hashent = hashfn(cli, id, type);
639 struct osc_quota_info *oqi;
642 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
643 list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
644 if (oqi->oqi_cli == cli &&
645 oqi->oqi_id == id && oqi->oqi_type == type)
651 static struct osc_quota_info *alloc_qinfo(struct client_obd *cli,
652 unsigned int id, int type)
654 struct osc_quota_info *oqi;
657 OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_STD, sizeof(*oqi));
661 CFS_INIT_LIST_HEAD(&oqi->oqi_hash);
664 oqi->oqi_type = type;
669 static void free_qinfo(struct osc_quota_info *oqi)
671 OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
674 int osc_quota_chkdq(struct client_obd *cli, unsigned int uid, unsigned int gid)
677 int cnt, rc = QUOTA_OK;
680 spin_lock(&qinfo_list_lock);
681 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
682 struct osc_quota_info *oqi = NULL;
684 id = (cnt == USRQUOTA) ? uid : gid;
685 oqi = find_qinfo(cli, id, cnt);
691 spin_unlock(&qinfo_list_lock);
696 int osc_quota_setdq(struct client_obd *cli, unsigned int uid, unsigned int gid,
697 obd_flag valid, obd_flag flags)
705 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
706 struct osc_quota_info *oqi, *old;
708 if (!(valid & ((cnt == USRQUOTA) ?
709 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
712 id = (cnt == USRQUOTA) ? uid : gid;
713 noquota = (cnt == USRQUOTA) ?
714 (flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
716 oqi = alloc_qinfo(cli, id, cnt);
718 spin_lock(&qinfo_list_lock);
720 old = find_qinfo(cli, id, cnt);
722 remove_qinfo_hash(old);
723 else if (!old && noquota)
724 insert_qinfo_hash(oqi);
726 spin_unlock(&qinfo_list_lock);
733 CERROR("not enough mem!\n");
742 int osc_quota_cleanup(struct obd_device *obd)
744 struct client_obd *cli = &obd->u.cli;
745 struct osc_quota_info *oqi, *n;
749 spin_lock(&qinfo_list_lock);
750 for (i = 0; i < NR_DQHASH; i++) {
751 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
752 if (oqi->oqi_cli != cli)
754 remove_qinfo_hash(oqi);
758 spin_unlock(&qinfo_list_lock);
763 int osc_quota_init(void)
768 LASSERT(qinfo_cachep == NULL);
769 qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
770 sizeof(struct osc_quota_info),
775 for (i = 0; i < NR_DQHASH; i++)
776 CFS_INIT_LIST_HEAD(qinfo_hash + i);
781 int osc_quota_exit(void)
783 struct osc_quota_info *oqi, *n;
787 spin_lock(&qinfo_list_lock);
788 for (i = 0; i < NR_DQHASH; i++) {
789 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
790 remove_qinfo_hash(oqi);
794 spin_unlock(&qinfo_list_lock);
796 rc = cfs_mem_cache_destroy(qinfo_cachep);
797 LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
804 #ifdef HAVE_QUOTA_SUPPORT
805 quota_interface_t mds_quota_interface = {
806 .quota_init = mds_quota_init,
807 .quota_exit = mds_quota_exit,
808 .quota_setup = mds_quota_setup,
809 .quota_cleanup = mds_quota_cleanup,
810 .quota_check = target_quota_check,
811 .quota_ctl = mds_quota_ctl,
812 .quota_setinfo = mds_quota_setinfo,
813 .quota_fs_cleanup = mds_quota_fs_cleanup,
814 .quota_recovery = mds_quota_recovery,
815 .quota_adjust = mds_quota_adjust,
816 .quota_chkquota = quota_chk_acq_common,
817 .quota_acquire = quota_acquire_common,
818 .quota_pending_commit = quota_pending_commit,
821 quota_interface_t filter_quota_interface = {
822 .quota_setup = filter_quota_setup,
823 .quota_cleanup = filter_quota_cleanup,
824 .quota_check = target_quota_check,
825 .quota_ctl = filter_quota_ctl,
826 .quota_setinfo = filter_quota_setinfo,
827 .quota_clearinfo = filter_quota_clearinfo,
828 .quota_enforce = filter_quota_enforce,
829 .quota_getflag = filter_quota_getflag,
830 .quota_acquire = quota_acquire_common,
831 .quota_adjust = filter_quota_adjust,
832 .quota_chkquota = quota_chk_acq_common,
833 .quota_adjust_qunit = filter_quota_adjust_qunit,
834 .quota_pending_commit = quota_pending_commit,
837 #endif /* __KERNEL__ */
839 quota_interface_t mdc_quota_interface = {
840 .quota_ctl = client_quota_ctl,
841 .quota_check = client_quota_check,
842 .quota_poll_check = client_quota_poll_check,
845 quota_interface_t lmv_quota_interface = {
846 .quota_ctl = lmv_quota_ctl,
847 .quota_check = lmv_quota_check,
850 quota_interface_t osc_quota_interface = {
851 .quota_ctl = client_quota_ctl,
852 .quota_check = client_quota_check,
853 .quota_poll_check = client_quota_poll_check,
854 .quota_init = osc_quota_init,
855 .quota_exit = osc_quota_exit,
856 .quota_chkdq = osc_quota_chkdq,
857 .quota_setdq = osc_quota_setdq,
858 .quota_cleanup = osc_quota_cleanup,
859 .quota_adjust_qunit = client_quota_adjust_qunit,
862 quota_interface_t lov_quota_interface = {
863 .quota_ctl = lov_quota_ctl,
864 .quota_check = lov_quota_check,
865 .quota_adjust_qunit = lov_quota_adjust_qunit,
870 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
872 static int __init init_lustre_quota(void)
874 #ifdef HAVE_QUOTA_SUPPORT
877 lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
880 if (IS_ERR(lquota_type_proc_dir)) {
881 CERROR("LProcFS failed in lquota-init\n");
882 rc = PTR_ERR(lquota_type_proc_dir);
886 rc = qunit_cache_init();
890 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
891 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
893 PORTAL_SYMBOL_REGISTER(mdc_quota_interface);
894 PORTAL_SYMBOL_REGISTER(lmv_quota_interface);
895 PORTAL_SYMBOL_REGISTER(osc_quota_interface);
896 PORTAL_SYMBOL_REGISTER(lov_quota_interface);
900 static void /*__exit*/ exit_lustre_quota(void)
902 PORTAL_SYMBOL_UNREGISTER(mdc_quota_interface);
903 PORTAL_SYMBOL_UNREGISTER(lmv_quota_interface);
904 PORTAL_SYMBOL_UNREGISTER(osc_quota_interface);
905 PORTAL_SYMBOL_UNREGISTER(lov_quota_interface);
906 #ifdef HAVE_QUOTA_SUPPORT
907 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
908 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
910 qunit_cache_cleanup();
912 if (lquota_type_proc_dir)
913 lprocfs_remove(&lquota_type_proc_dir);
917 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
918 MODULE_DESCRIPTION("Lustre Quota");
919 MODULE_LICENSE("GPL");
921 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
923 #ifdef HAVE_QUOTA_SUPPORT
924 EXPORT_SYMBOL(mds_quota_interface);
925 EXPORT_SYMBOL(filter_quota_interface);
927 EXPORT_SYMBOL(mdc_quota_interface);
928 EXPORT_SYMBOL(lmv_quota_interface);
929 EXPORT_SYMBOL(osc_quota_interface);
930 EXPORT_SYMBOL(lov_quota_interface);
931 #endif /* __KERNEL */