1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/quota/quota_interface.c
6 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * No redistribution or use is permitted outside of Cluster File Systems, Inc.
14 # define EXPORT_SYMTAB
16 #define DEBUG_SUBSYSTEM S_MDS
19 # include <linux/version.h>
20 # include <linux/module.h>
21 # include <linux/init.h>
22 # include <linux/fs.h>
23 # include <linux/jbd.h>
24 # include <linux/ext3_fs.h>
25 # if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
26 # include <linux/smp_lock.h>
27 # include <linux/buffer_head.h>
28 # include <linux/workqueue.h>
29 # include <linux/mount.h>
31 # include <linux/locks.h>
33 #else /* __KERNEL__ */
34 # include <liblustre.h>
37 #include <obd_class.h>
38 #include <lustre_mds.h>
39 #include <lustre_dlm.h>
40 #include <lustre_cfg.h>
42 #include <lustre_fsfilt.h>
43 #include <lustre_quota.h>
44 #include <lprocfs_status.h>
45 #include "quota_internal.h"
49 /* quota proc file handling functions */
55 #define MAX_STYPE_SIZE 5
56 int lprocfs_quota_rd_type(char *page, char **start, off_t off, int count,
59 struct obd_device *obd = (struct obd_device *)data;
60 char stype[MAX_STYPE_SIZE + 1] = "";
61 int type = obd->u.obt.obt_qctxt.lqc_atype;
67 if (type & USER_QUOTA)
69 if (type & GROUP_QUOTA)
73 /* append with quota version on MDS */
74 if (!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME)) {
76 lustre_quota_version_t version;
78 rc = mds_quota_get_version(obd, &version);
94 return snprintf(page, count, "%s\n", stype);
96 EXPORT_SYMBOL(lprocfs_quota_rd_type);
98 static int auto_quota_on(struct obd_device *obd, int type,
99 struct super_block *sb, int is_master)
101 struct obd_quotactl *oqctl;
102 struct lvfs_run_ctxt saved;
106 LASSERT(type == USRQUOTA || type == GRPQUOTA || type == UGQUOTA);
108 /* quota already turned on */
109 if (obd->u.obt.obt_qctxt.lqc_status)
112 OBD_ALLOC_PTR(oqctl);
116 oqctl->qc_type = type;
117 oqctl->qc_cmd = Q_QUOTAON;
118 oqctl->qc_id = QFMT_LDISKFS;
120 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
125 /* turn on cluster wide quota */
126 rc = mds_admin_quota_on(obd, oqctl);
128 CDEBUG(rc == -ENOENT ? D_QUOTA : D_ERROR,
129 "auto-enable admin quota failed. rc=%d\n", rc);
133 /* turn on local quota */
134 rc = fsfilt_quotactl(obd, sb, oqctl);
136 CDEBUG(rc == -ENOENT ? D_QUOTA : D_ERROR,
137 "auto-enable local quota failed. rc=%d\n", rc);
139 mds_quota_off(obd, oqctl);
141 obd->u.obt.obt_qctxt.lqc_status = 1;
144 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
150 int lprocfs_quota_wr_type(struct file *file, const char *buffer,
151 unsigned long count, void *data)
153 struct obd_device *obd = (struct obd_device *)data;
154 struct obd_device_target *obt = &obd->u.obt;
157 char stype[MAX_STYPE_SIZE + 1] = "";
158 LASSERT(obd != NULL);
160 if (count > MAX_STYPE_SIZE)
163 if (copy_from_user(stype, buffer, count))
166 for (i = 0 ; i < count ; i++) {
176 /* quota version specifiers */
178 if (strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME))
181 rc = mds_quota_set_version(obd, LUSTRE_QUOTA_V1);
183 CDEBUG(D_QUOTA, "failed to set quota v1! %d\n", rc);
188 if (strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME))
191 rc = mds_quota_set_version(obd, LUSTRE_QUOTA_V2);
193 CDEBUG(D_QUOTA, "could not set quota v2! %d\n", rc);
197 default : /* just skip stray symbols like \n */
202 obt->obt_qctxt.lqc_atype = type;
207 if (!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME))
208 auto_quota_on(obd, type - 1, obt->obt_sb, 1);
209 else if (!strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME))
210 auto_quota_on(obd, type - 1, obt->obt_sb, 0);
216 EXPORT_SYMBOL(lprocfs_quota_wr_type);
220 static int filter_quota_setup(struct obd_device *obd)
223 struct obd_device_target *obt = &obd->u.obt;
226 atomic_set(&obt->obt_quotachecking, 1);
227 rc = qctxt_init(&obt->obt_qctxt, obt->obt_sb, NULL);
229 CERROR("initialize quota context failed! (rc:%d)\n", rc);
235 static int filter_quota_cleanup(struct obd_device *obd)
237 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
241 static int filter_quota_setinfo(struct obd_export *exp, struct obd_device *obd)
243 struct obd_import *imp;
245 /* setup the quota context import */
246 spin_lock(&obd->u.obt.obt_qctxt.lqc_lock);
247 obd->u.obt.obt_qctxt.lqc_import = exp->exp_imp_reverse;
248 spin_unlock(&obd->u.obt.obt_qctxt.lqc_lock);
250 /* make imp's connect flags equal relative exp's connect flags
251 * adding it to avoid the scan export list
253 imp = exp->exp_imp_reverse;
255 imp->imp_connect_data.ocd_connect_flags |=
256 (exp->exp_connect_flags &
257 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
259 /* start quota slave recovery thread. (release high limits) */
260 qslave_start_recovery(obd, &obd->u.obt.obt_qctxt);
264 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
266 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
268 /* lquota may be not set up before destroying export, b=14896 */
269 if (!obd->obd_set_up)
272 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
273 * should be invalid b=12374 */
274 if (qctxt->lqc_import == exp->exp_imp_reverse) {
275 spin_lock(&qctxt->lqc_lock);
276 qctxt->lqc_import = NULL;
277 spin_unlock(&qctxt->lqc_lock);
283 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
287 if (!sb_any_quota_enabled(obd->u.obt.obt_sb))
291 cap_raise(current->cap_effective, CAP_SYS_RESOURCE);
293 cap_lower(current->cap_effective, CAP_SYS_RESOURCE);
298 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
300 struct obd_device_target *obt = &obd->u.obt;
301 int err, cnt, rc = 0;
302 struct obd_quotactl *oqctl;
305 if (!sb_any_quota_enabled(obt->obt_sb))
308 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
310 OBD_ALLOC_PTR(oqctl);
312 CERROR("Not enough memory!");
316 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
317 memset(oqctl, 0, sizeof(*oqctl));
319 oqctl->qc_cmd = Q_GETQUOTA;
320 oqctl->qc_type = cnt;
321 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
322 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
329 /* set over quota flags for a uid/gid */
330 oa->o_valid |= (cnt == USRQUOTA) ?
331 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA;
332 if (oqctl->qc_dqblk.dqb_bhardlimit &&
333 (toqb(oqctl->qc_dqblk.dqb_curspace) >
334 oqctl->qc_dqblk.dqb_bhardlimit))
335 oa->o_flags |= (cnt == USRQUOTA) ?
336 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
342 static int filter_quota_acquire(struct obd_device *obd, unsigned int uid,
345 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
349 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, LQUOTA_FLAGS_BLK, 1);
353 /* check whether the left quota of certain uid and gid can satisfy a block_write
354 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA */
355 static int quota_check_common(struct obd_device *obd, unsigned int uid,
356 unsigned int gid, int count, int cycle, int isblk)
358 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
360 __u32 id[MAXQUOTAS] = { uid, gid };
361 struct qunit_data qdata[MAXQUOTAS];
362 int rc = 0, rc2[2] = { 0, 0 };
365 CLASSERT(MAXQUOTAS < 4);
366 if (!sb_any_quota_enabled(qctxt->lqc_sb))
369 for (i = 0; i < MAXQUOTAS; i++) {
370 struct lustre_qunit_size *lqs = NULL;
372 qdata[i].qd_id = id[i];
373 qdata[i].qd_flags = i;
375 QDATA_SET_BLK(&qdata[i]);
376 qdata[i].qd_count = 0;
378 /* ignore root user */
379 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
382 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
386 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
387 spin_lock(&lqs->lqs_lock);
389 rc = QUOTA_RET_INC_PENDING;
391 lqs->lqs_bwrite_pending += count;
393 lqs->lqs_iwrite_pending += count;
396 CDEBUG(D_QUOTA, "write pending: %lu, qd_count: "LPU64".\n",
397 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
399 if (rc2[i] == QUOTA_RET_OK) {
400 if (isblk && qdata[i].qd_count <
401 lqs->lqs_bwrite_pending * CFS_PAGE_SIZE)
402 rc2[i] = QUOTA_RET_ACQUOTA;
403 if (!isblk && qdata[i].qd_count <
404 lqs->lqs_iwrite_pending)
405 rc2[i] = QUOTA_RET_ACQUOTA;
408 spin_unlock(&lqs->lqs_lock);
410 /* When cycle is zero, lqs_*_pending will be changed. We will
411 * get reference of the lqs here and put reference of lqs in
412 * quota_pending_commit b=14784 */
416 /* this is for quota_search_lqs */
420 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
421 RETURN(rc | QUOTA_RET_ACQUOTA);
426 static int quota_chk_acq_common(struct obd_device *obd, unsigned int uid,
427 unsigned int gid, int count, int *pending,
428 int isblk, quota_acquire acquire)
430 int rc = 0, cycle = 0, count_err = 0;
433 /* Unfortunately, if quota master is too busy to handle the
434 * pre-dqacq in time and quota hash on ost is used up, we
435 * have to wait for the completion of in flight dqacq/dqrel,
436 * in order to get enough quota for write b=12588 */
437 while ((rc = quota_check_common(obd, uid, gid, count, cycle, isblk)) &
440 if (rc & QUOTA_RET_INC_PENDING)
445 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
446 /* after acquire(), we should run quota_check_common again
447 * so that we confirm there are enough quota to finish write */
448 rc = acquire(obd, uid, gid);
450 /* please reference to dqacq_completion for the below */
451 /* a new request is finished, try again */
453 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
457 /* it is out of quota already */
459 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
463 /* -EBUSY and others, try 10 times */
464 if (rc < 0 && count_err < 10) {
465 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc, count_err++);
466 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
470 if (count_err >= 10 || cycle >= 1000) {
471 CDEBUG(D_ERROR, "we meet 10 errors or run too many"
472 " cycles when acquiring quota, quit checking with"
473 " rc: %d, cycle: %d.\n", rc, cycle);
477 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
481 if (!cycle && rc & QUOTA_RET_INC_PENDING)
488 static int filter_quota_check(struct obd_device *obd, unsigned int uid,
489 unsigned int gid, int npage, int *flag,
490 quota_acquire acquire)
492 return quota_chk_acq_common(obd, uid, gid, npage, flag, LQUOTA_FLAGS_BLK,
496 /* when a block_write or inode_create rpc is finished, adjust the record for
497 * pending blocks and inodes*/
498 static int quota_pending_commit(struct obd_device *obd, unsigned int uid,
499 unsigned int gid, int count, int isblk)
501 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
503 __u32 id[MAXQUOTAS] = { uid, gid };
504 struct qunit_data qdata[MAXQUOTAS];
507 CLASSERT(MAXQUOTAS < 4);
508 if (!sb_any_quota_enabled(qctxt->lqc_sb))
511 for (i = 0; i < MAXQUOTAS; i++) {
512 struct lustre_qunit_size *lqs = NULL;
514 qdata[i].qd_id = id[i];
515 qdata[i].qd_flags = i;
517 QDATA_SET_BLK(&qdata[i]);
518 qdata[i].qd_count = 0;
520 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
523 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
526 spin_lock(&lqs->lqs_lock);
527 CDEBUG(D_QUOTA, "pending: %lu, count: %d.\n",
528 isblk ? lqs->lqs_bwrite_pending :
529 lqs->lqs_iwrite_pending, count);
532 if (lqs->lqs_bwrite_pending >= count) {
533 lqs->lqs_bwrite_pending -= count;
537 "there are too many blocks!\n");
540 if (lqs->lqs_iwrite_pending >= count) {
541 lqs->lqs_iwrite_pending -= count;
545 "there are too many files!\n");
549 spin_unlock(&lqs->lqs_lock);
551 /* When lqs_*_pening is changed back, we'll putref lqs
561 static int filter_quota_pending_commit(struct obd_device *obd, unsigned int uid,
562 unsigned int gid, int npage)
564 return quota_pending_commit(obd, uid, gid, npage, LQUOTA_FLAGS_BLK);
567 static int mds_quota_init(void)
569 return lustre_dquot_init();
572 static int mds_quota_exit(void)
578 static int mds_quota_setup(struct obd_device *obd)
580 struct obd_device_target *obt = &obd->u.obt;
581 struct mds_obd *mds = &obd->u.mds;
585 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
586 atomic_set(&obt->obt_quotachecking, 1);
587 /* initialize quota master and quota context */
588 sema_init(&mds->mds_qonoff_sem, 1);
589 rc = qctxt_init(&obt->obt_qctxt, obt->obt_sb, dqacq_handler);
591 CERROR("initialize quota context failed! (rc:%d)\n", rc);
597 static int mds_quota_cleanup(struct obd_device *obd)
599 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
603 static int mds_quota_fs_cleanup(struct obd_device *obd)
605 struct mds_obd *mds = &obd->u.mds;
609 /* close admin quota files */
610 down(&mds->mds_qonoff_sem);
611 for (i = 0; i < MAXQUOTAS; i++) {
612 if (mds->mds_quota_info.qi_files[i]) {
613 filp_close(mds->mds_quota_info.qi_files[i], 0);
614 mds->mds_quota_info.qi_files[i] = NULL;
617 up(&mds->mds_qonoff_sem);
621 static int mds_quota_check(struct obd_device *obd, unsigned int uid,
622 unsigned int gid, int inodes, int *flag,
623 quota_acquire acquire)
625 return quota_chk_acq_common(obd, uid, gid, inodes, flag, 0, acquire);
628 static int mds_quota_acquire(struct obd_device *obd, unsigned int uid,
631 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
635 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, 0, 1);
639 static int mds_quota_pending_commit(struct obd_device *obd, unsigned int uid,
640 unsigned int gid, int inodes)
642 return quota_pending_commit(obd, uid, gid, inodes, 0);
644 #endif /* __KERNEL__ */
646 struct osc_quota_info {
647 struct list_head oqi_hash; /* hash list */
648 struct client_obd *oqi_cli; /* osc obd */
649 unsigned int oqi_id; /* uid/gid of a file */
650 short oqi_type; /* quota type */
653 spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
655 static struct list_head qinfo_hash[NR_DQHASH];
656 /* SLAB cache for client quota context */
657 cfs_mem_cache_t *qinfo_cachep = NULL;
659 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
660 __attribute__((__const__));
662 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
664 unsigned long tmp = ((unsigned long)cli>>6) ^ id;
665 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
669 /* caller must hold qinfo_list_lock */
670 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
672 struct list_head *head = qinfo_hash +
673 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
675 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
676 list_add(&oqi->oqi_hash, head);
679 /* caller must hold qinfo_list_lock */
680 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
682 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
683 list_del_init(&oqi->oqi_hash);
686 /* caller must hold qinfo_list_lock */
687 static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
688 unsigned int id, int type)
690 unsigned int hashent = hashfn(cli, id, type);
691 struct osc_quota_info *oqi;
693 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
694 list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
695 if (oqi->oqi_cli == cli &&
696 oqi->oqi_id == id && oqi->oqi_type == type)
702 static struct osc_quota_info *alloc_qinfo(struct client_obd *cli,
703 unsigned int id, int type)
705 struct osc_quota_info *oqi;
708 OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_STD, sizeof(*oqi));
712 INIT_LIST_HEAD(&oqi->oqi_hash);
715 oqi->oqi_type = type;
720 static void free_qinfo(struct osc_quota_info *oqi)
722 OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
725 int osc_quota_chkdq(struct client_obd *cli, unsigned int uid, unsigned int gid)
728 int cnt, rc = QUOTA_OK;
731 spin_lock(&qinfo_list_lock);
732 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
733 struct osc_quota_info *oqi = NULL;
735 id = (cnt == USRQUOTA) ? uid : gid;
736 oqi = find_qinfo(cli, id, cnt);
742 spin_unlock(&qinfo_list_lock);
747 int osc_quota_setdq(struct client_obd *cli, unsigned int uid, unsigned int gid,
748 obd_flag valid, obd_flag flags)
756 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
757 struct osc_quota_info *oqi, *old;
759 if (!(valid & ((cnt == USRQUOTA) ?
760 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
763 id = (cnt == USRQUOTA) ? uid : gid;
764 noquota = (cnt == USRQUOTA) ?
765 (flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
767 oqi = alloc_qinfo(cli, id, cnt);
769 spin_lock(&qinfo_list_lock);
771 old = find_qinfo(cli, id, cnt);
773 remove_qinfo_hash(old);
774 else if (!old && noquota)
775 insert_qinfo_hash(oqi);
777 spin_unlock(&qinfo_list_lock);
784 CERROR("not enough mem!\n");
793 int osc_quota_cleanup(struct obd_device *obd)
795 struct client_obd *cli = &obd->u.cli;
796 struct osc_quota_info *oqi, *n;
800 spin_lock(&qinfo_list_lock);
801 for (i = 0; i < NR_DQHASH; i++) {
802 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
803 if (oqi->oqi_cli != cli)
805 remove_qinfo_hash(oqi);
809 spin_unlock(&qinfo_list_lock);
814 int osc_quota_init(void)
819 LASSERT(qinfo_cachep == NULL);
820 qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
821 sizeof(struct osc_quota_info),
826 for (i = 0; i < NR_DQHASH; i++)
827 INIT_LIST_HEAD(qinfo_hash + i);
832 int osc_quota_exit(void)
834 struct osc_quota_info *oqi, *n;
838 spin_lock(&qinfo_list_lock);
839 for (i = 0; i < NR_DQHASH; i++) {
840 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
841 remove_qinfo_hash(oqi);
845 spin_unlock(&qinfo_list_lock);
847 rc = cfs_mem_cache_destroy(qinfo_cachep);
848 LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
855 quota_interface_t mds_quota_interface = {
856 .quota_init = mds_quota_init,
857 .quota_exit = mds_quota_exit,
858 .quota_setup = mds_quota_setup,
859 .quota_cleanup = mds_quota_cleanup,
860 .quota_check = target_quota_check,
861 .quota_ctl = mds_quota_ctl,
862 .quota_fs_cleanup =mds_quota_fs_cleanup,
863 .quota_recovery = mds_quota_recovery,
864 .quota_adjust = mds_quota_adjust,
865 .quota_chkquota = mds_quota_check,
866 .quota_acquire = mds_quota_acquire,
867 .quota_pending_commit = mds_quota_pending_commit,
870 quota_interface_t filter_quota_interface = {
871 .quota_setup = filter_quota_setup,
872 .quota_cleanup = filter_quota_cleanup,
873 .quota_check = target_quota_check,
874 .quota_ctl = filter_quota_ctl,
875 .quota_setinfo = filter_quota_setinfo,
876 .quota_clearinfo = filter_quota_clearinfo,
877 .quota_enforce = filter_quota_enforce,
878 .quota_getflag = filter_quota_getflag,
879 .quota_acquire = filter_quota_acquire,
880 .quota_adjust = filter_quota_adjust,
881 .quota_chkquota = filter_quota_check,
882 .quota_adjust_qunit = filter_quota_adjust_qunit,
883 .quota_pending_commit = filter_quota_pending_commit,
885 #endif /* __KERNEL__ */
887 quota_interface_t mdc_quota_interface = {
888 .quota_ctl = client_quota_ctl,
889 .quota_check = client_quota_check,
890 .quota_poll_check = client_quota_poll_check,
893 quota_interface_t osc_quota_interface = {
894 .quota_ctl = client_quota_ctl,
895 .quota_check = client_quota_check,
896 .quota_poll_check = client_quota_poll_check,
897 .quota_init = osc_quota_init,
898 .quota_exit = osc_quota_exit,
899 .quota_chkdq = osc_quota_chkdq,
900 .quota_setdq = osc_quota_setdq,
901 .quota_cleanup = osc_quota_cleanup,
902 .quota_adjust_qunit = client_quota_adjust_qunit,
905 quota_interface_t lov_quota_interface = {
906 .quota_check = lov_quota_check,
907 .quota_ctl = lov_quota_ctl,
908 .quota_adjust_qunit = lov_quota_adjust_qunit,
912 static int __init init_lustre_quota(void)
914 int rc = qunit_cache_init();
917 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
918 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
919 PORTAL_SYMBOL_REGISTER(mdc_quota_interface);
920 PORTAL_SYMBOL_REGISTER(osc_quota_interface);
921 PORTAL_SYMBOL_REGISTER(lov_quota_interface);
925 static void /*__exit*/ exit_lustre_quota(void)
927 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
928 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
929 PORTAL_SYMBOL_UNREGISTER(mdc_quota_interface);
930 PORTAL_SYMBOL_UNREGISTER(osc_quota_interface);
931 PORTAL_SYMBOL_UNREGISTER(lov_quota_interface);
933 qunit_cache_cleanup();
936 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
937 MODULE_DESCRIPTION("Lustre Quota");
938 MODULE_LICENSE("GPL");
940 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
942 EXPORT_SYMBOL(mds_quota_interface);
943 EXPORT_SYMBOL(filter_quota_interface);
944 EXPORT_SYMBOL(mdc_quota_interface);
945 EXPORT_SYMBOL(osc_quota_interface);
946 EXPORT_SYMBOL(lov_quota_interface);
947 #endif /* __KERNEL */