1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LQUOTA
43 # include <linux/version.h>
44 # include <linux/module.h>
45 # include <linux/init.h>
46 # include <linux/fs.h>
47 # include <linux/jbd.h>
48 # include <linux/smp_lock.h>
49 # include <linux/buffer_head.h>
50 # include <linux/workqueue.h>
51 # include <linux/mount.h>
52 #else /* __KERNEL__ */
53 # include <liblustre.h>
56 #include <obd_class.h>
57 #include <lustre_mds.h>
58 #include <lustre_dlm.h>
59 #include <lustre_cfg.h>
61 #include <lustre_fsfilt.h>
62 #include <lustre_quota.h>
63 #include <lprocfs_status.h>
64 #include "quota_internal.h"
68 #ifdef HAVE_QUOTA_SUPPORT
70 static cfs_time_t last_print = 0;
71 static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
73 static int filter_quota_setup(struct obd_device *obd)
76 struct obd_device_target *obt = &obd->u.obt;
80 obt->obt_qfmt = LUSTRE_QUOTA_V2;
82 obt->obt_qfmt = LUSTRE_QUOTA_V1;
84 atomic_set(&obt->obt_quotachecking, 1);
85 rc = qctxt_init(obd, NULL);
87 CERROR("initialize quota context failed! (rc:%d)\n", rc);
92 static int filter_quota_cleanup(struct obd_device *obd)
94 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
98 static int filter_quota_setinfo(struct obd_export *exp, struct obd_device *obd)
100 struct obd_import *imp;
101 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
104 /* setup the quota context import */
105 spin_lock(&obd->u.obt.obt_qctxt.lqc_lock);
106 obd->u.obt.obt_qctxt.lqc_import = exp->exp_imp_reverse;
107 spin_unlock(&obd->u.obt.obt_qctxt.lqc_lock);
108 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated now, \n",
109 obd->obd_name,exp->exp_imp_reverse, obd);
111 /* make imp's connect flags equal relative exp's connect flags
112 * adding it to avoid the scan export list
114 imp = exp->exp_imp_reverse;
116 imp->imp_connect_data.ocd_connect_flags |=
117 (exp->exp_connect_flags &
118 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
120 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
121 /* start quota slave recovery thread. (release high limits) */
122 qslave_start_recovery(obd, &obd->u.obt.obt_qctxt);
126 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
128 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
131 /* lquota may be not set up before destroying export, b=14896 */
132 if (!obd->obd_set_up)
135 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
136 * should be invalid b=12374 */
137 if (qctxt->lqc_import && qctxt->lqc_import == exp->exp_imp_reverse) {
138 spin_lock(&qctxt->lqc_lock);
139 qctxt->lqc_import = NULL;
140 spin_unlock(&qctxt->lqc_lock);
141 ptlrpc_cleanup_imp(exp->exp_imp_reverse);
142 dqacq_interrupt(qctxt);
143 CDEBUG(D_QUOTA, "%s: lqc_import of obd(%p) is invalid now.\n",
149 static int filter_quota_enforce(struct obd_device *obd, unsigned int ignore)
153 if (!sb_any_quota_enabled(obd->u.obt.obt_sb))
157 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
158 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
160 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
166 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
168 struct obd_device_target *obt = &obd->u.obt;
169 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
170 int err, cnt, rc = 0;
171 struct obd_quotactl *oqctl;
174 if (!sb_any_quota_enabled(obt->obt_sb))
177 OBD_ALLOC_PTR(oqctl);
179 CERROR("Not enough memory!");
183 /* set over quota flags for a uid/gid */
184 oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
185 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
187 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
188 struct quota_adjust_qunit oqaq_tmp;
189 struct lustre_qunit_size *lqs = NULL;
191 oqaq_tmp.qaq_flags = cnt;
192 oqaq_tmp.qaq_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
194 quota_search_lqs(NULL, &oqaq_tmp, qctxt, &lqs);
196 spin_lock(&lqs->lqs_lock);
197 if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
198 oa->o_flags |= (cnt == USRQUOTA) ?
199 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
200 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
201 "sync_blk(%d)\n", lqs->lqs_bunit_sz,
202 qctxt->lqc_sync_blk);
203 spin_unlock(&lqs->lqs_lock);
204 /* this is for quota_search_lqs */
208 spin_unlock(&lqs->lqs_lock);
209 /* this is for quota_search_lqs */
213 memset(oqctl, 0, sizeof(*oqctl));
215 oqctl->qc_cmd = Q_GETQUOTA;
216 oqctl->qc_type = cnt;
217 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
218 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
222 oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
227 if (oqctl->qc_dqblk.dqb_bhardlimit &&
228 (toqb(oqctl->qc_dqblk.dqb_curspace) >=
229 oqctl->qc_dqblk.dqb_bhardlimit))
230 oa->o_flags |= (cnt == USRQUOTA) ?
231 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
237 static int filter_quota_acquire(struct obd_device *obd, unsigned int uid,
238 unsigned int gid, struct obd_trans_info *oti)
240 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
244 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, LQUOTA_FLAGS_BLK, 1, oti);
248 /* check whether the left quota of certain uid and gid can satisfy a block_write
249 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA */
250 static int quota_check_common(struct obd_device *obd, unsigned int uid,
251 unsigned int gid, int count, int cycle, int isblk,
252 struct inode *inode, int frags, int *pending)
254 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
256 __u32 id[MAXQUOTAS] = { uid, gid };
257 struct qunit_data qdata[MAXQUOTAS];
259 int rc = 0, rc2[2] = { 0, 0 };
262 CLASSERT(MAXQUOTAS < 4);
263 if (!sb_any_quota_enabled(qctxt->lqc_sb))
266 spin_lock(&qctxt->lqc_lock);
267 if (!qctxt->lqc_valid){
268 spin_unlock(&qctxt->lqc_lock);
271 spin_unlock(&qctxt->lqc_lock);
273 for (i = 0; i < MAXQUOTAS; i++) {
274 struct lustre_qunit_size *lqs = NULL;
276 qdata[i].qd_id = id[i];
277 qdata[i].qd_flags = i;
279 QDATA_SET_BLK(&qdata[i]);
280 qdata[i].qd_count = 0;
282 /* ignore root user */
283 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
286 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
290 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
291 spin_lock(&lqs->lqs_lock);
294 *pending = count * CFS_PAGE_SIZE;
295 /* in order to complete this write, we need extra
296 * meta blocks. This function can get it through
297 * data needed to be written b=16542 */
299 LASSERT(inode && frags > 0);
300 if (fsfilt_get_mblk(obd, qctxt->lqc_sb, &mb,
303 "can't get extra meta blocks.\n");
306 lqs->lqs_bwrite_pending += *pending;
309 lqs->lqs_iwrite_pending += *pending;
313 /* if xx_rec < 0, that means quota are releasing,
314 * and it may return before we use quota. So if
315 * we find this situation, we assuming it has
316 * returned b=18491 */
317 if (isblk && lqs->lqs_blk_rec < 0) {
318 if (qdata[i].qd_count < -lqs->lqs_blk_rec)
319 qdata[i].qd_count = 0;
321 qdata[i].qd_count += lqs->lqs_blk_rec;
323 if (!isblk && lqs->lqs_ino_rec < 0) {
324 if (qdata[i].qd_count < -lqs->lqs_ino_rec)
325 qdata[i].qd_count = 0;
327 qdata[i].qd_count += lqs->lqs_ino_rec;
330 CDEBUG(D_QUOTA, "count: %d, lqs pending: %lu, qd_count: "LPU64
331 ", metablocks: %d, isblk: %d, pending: %d.\n", count,
332 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
333 qdata[i].qd_count, mb, isblk, *pending);
334 if (rc2[i] == QUOTA_RET_OK) {
335 if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
336 rc2[i] = QUOTA_RET_ACQUOTA;
337 if (!isblk && qdata[i].qd_count <
338 lqs->lqs_iwrite_pending)
339 rc2[i] = QUOTA_RET_ACQUOTA;
342 spin_unlock(&lqs->lqs_lock);
344 if (lqs->lqs_blk_rec < 0 &&
346 lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
347 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
349 /* When cycle is zero, lqs_*_pending will be changed. We will
350 * get reference of the lqs here and put reference of lqs in
351 * quota_pending_commit b=14784 */
355 /* this is for quota_search_lqs */
359 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
360 RETURN(QUOTA_RET_ACQUOTA);
365 static int quota_chk_acq_common(struct obd_device *obd, unsigned int uid,
366 unsigned int gid, int count, int *pending,
367 int isblk, quota_acquire acquire,
368 struct obd_trans_info *oti, struct inode *inode,
371 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
372 struct timeval work_start;
373 struct timeval work_end;
375 struct l_wait_info lwi = { 0 };
376 int rc = 0, cycle = 0, count_err = 1;
379 CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
380 /* Unfortunately, if quota master is too busy to handle the
381 * pre-dqacq in time and quota hash on ost is used up, we
382 * have to wait for the completion of in flight dqacq/dqrel,
383 * in order to get enough quota for write b=12588 */
384 do_gettimeofday(&work_start);
385 while ((rc = quota_check_common(obd, uid, gid, count, cycle, isblk,
386 inode, frags, pending)) & QUOTA_RET_ACQUOTA) {
388 spin_lock(&qctxt->lqc_lock);
389 if (!qctxt->lqc_import && oti) {
390 spin_unlock(&qctxt->lqc_lock);
392 LASSERT(oti && oti->oti_thread &&
393 oti->oti_thread->t_watchdog);
395 lc_watchdog_disable(oti->oti_thread->t_watchdog);
396 CDEBUG(D_QUOTA, "sleep for quota master\n");
397 l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
399 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
400 lc_watchdog_touch(oti->oti_thread->t_watchdog);
402 spin_unlock(&qctxt->lqc_lock);
407 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
408 /* after acquire(), we should run quota_check_common again
409 * so that we confirm there are enough quota to finish write */
410 rc = acquire(obd, uid, gid, oti);
412 /* please reference to dqacq_completion for the below */
413 /* a new request is finished, try again */
414 if (rc == QUOTA_REQ_RETURNED) {
415 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
419 /* it is out of quota already */
421 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
425 /* -EBUSY and others, wait a second and try again */
428 struct l_wait_info lwi;
430 if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
431 lc_watchdog_touch(oti->oti_thread->t_watchdog);
432 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
435 init_waitqueue_head(&waitq);
436 lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
438 l_wait_event(waitq, 0, &lwi);
441 if (rc < 0 || cycle % 10 == 2) {
442 spin_lock(&last_print_lock);
443 if (last_print == 0 ||
444 cfs_time_before((last_print + cfs_time_seconds(30)),
445 cfs_time_current())) {
446 CWARN("still haven't managed to acquire quota "
447 "space from the quota master after %d "
448 "retries (err=%d, rc=%d)\n",
449 cycle, count_err - 1, rc);
450 last_print = cfs_time_current();
452 spin_unlock(&last_print_lock);
455 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
459 do_gettimeofday(&work_end);
460 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
461 lprocfs_counter_add(qctxt->lqc_stats,
462 isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
463 LQUOTA_WAIT_FOR_CHK_INO,
469 static int filter_quota_check(struct obd_device *obd, unsigned int uid,
470 unsigned int gid, int npage, int *pending,
471 quota_acquire acquire, struct obd_trans_info *oti,
472 struct inode *inode, int frags)
474 return quota_chk_acq_common(obd, uid, gid, npage, pending, LQUOTA_FLAGS_BLK,
475 acquire, oti, inode, frags);
478 /* when a block_write or inode_create rpc is finished, adjust the record for
479 * pending blocks and inodes*/
480 static int quota_pending_commit(struct obd_device *obd, unsigned int uid,
481 unsigned int gid, int pending, int isblk)
483 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
484 struct timeval work_start;
485 struct timeval work_end;
488 __u32 id[MAXQUOTAS] = { uid, gid };
489 struct qunit_data qdata[MAXQUOTAS];
492 CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
493 CLASSERT(MAXQUOTAS < 4);
494 if (!sb_any_quota_enabled(qctxt->lqc_sb))
497 do_gettimeofday(&work_start);
498 for (i = 0; i < MAXQUOTAS; i++) {
499 struct lustre_qunit_size *lqs = NULL;
501 qdata[i].qd_id = id[i];
502 qdata[i].qd_flags = i;
504 QDATA_SET_BLK(&qdata[i]);
505 qdata[i].qd_count = 0;
507 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
510 quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
513 spin_lock(&lqs->lqs_lock);
515 if (lqs->lqs_bwrite_pending >= pending) {
516 lqs->lqs_bwrite_pending -= pending;
520 "there are too many blocks!\n");
523 if (lqs->lqs_iwrite_pending >= pending) {
524 lqs->lqs_iwrite_pending -= pending;
528 "there are too many files!\n");
531 CDEBUG(D_QUOTA, "lqs pending: %lu, pending: %d, "
533 isblk ? lqs->lqs_bwrite_pending :
534 lqs->lqs_iwrite_pending, pending, isblk);
536 spin_unlock(&lqs->lqs_lock);
538 /* When lqs_*_pening is changed back, we'll putref lqs
544 do_gettimeofday(&work_end);
545 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
546 lprocfs_counter_add(qctxt->lqc_stats,
547 isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
548 LQUOTA_WAIT_FOR_COMMIT_INO,
554 static int filter_quota_pending_commit(struct obd_device *obd, unsigned int uid,
555 unsigned int gid, int blocks)
557 return quota_pending_commit(obd, uid, gid, blocks, LQUOTA_FLAGS_BLK);
560 static int mds_quota_init(void)
562 return lustre_dquot_init();
565 static int mds_quota_exit(void)
571 static int mds_quota_setup(struct obd_device *obd)
573 struct obd_device_target *obt = &obd->u.obt;
574 struct mds_obd *mds = &obd->u.mds;
579 obt->obt_qfmt = LUSTRE_QUOTA_V2;
581 obt->obt_qfmt = LUSTRE_QUOTA_V1;
583 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
584 atomic_set(&obt->obt_quotachecking, 1);
585 /* initialize quota master and quota context */
586 sema_init(&mds->mds_qonoff_sem, 1);
587 rc = qctxt_init(obd, dqacq_handler);
589 CERROR("initialize quota context failed! (rc:%d)\n", rc);
595 static int mds_quota_cleanup(struct obd_device *obd)
597 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
601 static int mds_quota_fs_cleanup(struct obd_device *obd)
603 struct mds_obd *mds = &obd->u.mds;
604 struct obd_quotactl oqctl;
607 memset(&oqctl, 0, sizeof(oqctl));
608 oqctl.qc_type = UGQUOTA;
610 down(&mds->mds_qonoff_sem);
611 mds_admin_quota_off(obd, &oqctl);
612 up(&mds->mds_qonoff_sem);
616 static int mds_quota_check(struct obd_device *obd, unsigned int uid,
617 unsigned int gid, int inodes, int *pending,
618 quota_acquire acquire, struct obd_trans_info *oti,
619 struct inode *inode, int frags)
621 return quota_chk_acq_common(obd, uid, gid, inodes, pending, 0,
622 acquire, oti, inode, frags);
625 static int mds_quota_acquire(struct obd_device *obd, unsigned int uid,
626 unsigned int gid, struct obd_trans_info *oti)
628 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
632 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, 0, 1, oti);
636 static int mds_quota_pending_commit(struct obd_device *obd, unsigned int uid,
637 unsigned int gid, int inodes)
639 return quota_pending_commit(obd, uid, gid, inodes, 0);
641 #endif /* HAVE_QUOTA_SUPPORT */
642 #endif /* __KERNEL__ */
644 struct osc_quota_info {
645 struct list_head oqi_hash; /* hash list */
646 struct client_obd *oqi_cli; /* osc obd */
647 unsigned int oqi_id; /* uid/gid of a file */
648 short oqi_type; /* quota type */
651 spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
653 static struct list_head qinfo_hash[NR_DQHASH];
654 /* SLAB cache for client quota context */
655 cfs_mem_cache_t *qinfo_cachep = NULL;
657 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
658 __attribute__((__const__));
660 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
662 unsigned long tmp = ((unsigned long)cli>>6) ^ id;
663 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
667 /* caller must hold qinfo_list_lock */
668 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
670 struct list_head *head = qinfo_hash +
671 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
673 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
674 list_add(&oqi->oqi_hash, head);
677 /* caller must hold qinfo_list_lock */
678 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
680 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
681 list_del_init(&oqi->oqi_hash);
684 /* caller must hold qinfo_list_lock */
685 static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
686 unsigned int id, int type)
688 unsigned int hashent = hashfn(cli, id, type);
689 struct osc_quota_info *oqi;
691 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
692 list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
693 if (oqi->oqi_cli == cli &&
694 oqi->oqi_id == id && oqi->oqi_type == type)
700 static struct osc_quota_info *alloc_qinfo(struct client_obd *cli,
701 unsigned int id, int type)
703 struct osc_quota_info *oqi;
706 OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_STD, sizeof(*oqi));
710 CFS_INIT_LIST_HEAD(&oqi->oqi_hash);
713 oqi->oqi_type = type;
718 static void free_qinfo(struct osc_quota_info *oqi)
720 OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
723 int osc_quota_chkdq(struct client_obd *cli, unsigned int uid, unsigned int gid)
726 int cnt, rc = QUOTA_OK;
729 spin_lock(&qinfo_list_lock);
730 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
731 struct osc_quota_info *oqi = NULL;
733 id = (cnt == USRQUOTA) ? uid : gid;
734 oqi = find_qinfo(cli, id, cnt);
740 spin_unlock(&qinfo_list_lock);
745 int osc_quota_setdq(struct client_obd *cli, unsigned int uid, unsigned int gid,
746 obd_flag valid, obd_flag flags)
754 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
755 struct osc_quota_info *oqi, *old;
757 if (!(valid & ((cnt == USRQUOTA) ?
758 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
761 id = (cnt == USRQUOTA) ? uid : gid;
762 noquota = (cnt == USRQUOTA) ?
763 (flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
765 oqi = alloc_qinfo(cli, id, cnt);
767 spin_lock(&qinfo_list_lock);
769 old = find_qinfo(cli, id, cnt);
771 remove_qinfo_hash(old);
772 else if (!old && noquota)
773 insert_qinfo_hash(oqi);
775 spin_unlock(&qinfo_list_lock);
782 CERROR("not enough mem!\n");
791 int osc_quota_cleanup(struct obd_device *obd)
793 struct client_obd *cli = &obd->u.cli;
794 struct osc_quota_info *oqi, *n;
798 spin_lock(&qinfo_list_lock);
799 for (i = 0; i < NR_DQHASH; i++) {
800 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
801 if (oqi->oqi_cli != cli)
803 remove_qinfo_hash(oqi);
807 spin_unlock(&qinfo_list_lock);
812 int osc_quota_init(void)
817 LASSERT(qinfo_cachep == NULL);
818 qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
819 sizeof(struct osc_quota_info),
824 for (i = 0; i < NR_DQHASH; i++)
825 CFS_INIT_LIST_HEAD(qinfo_hash + i);
830 int osc_quota_exit(void)
832 struct osc_quota_info *oqi, *n;
836 spin_lock(&qinfo_list_lock);
837 for (i = 0; i < NR_DQHASH; i++) {
838 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
839 remove_qinfo_hash(oqi);
843 spin_unlock(&qinfo_list_lock);
845 rc = cfs_mem_cache_destroy(qinfo_cachep);
846 LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
853 #ifdef HAVE_QUOTA_SUPPORT
854 quota_interface_t mds_quota_interface = {
855 .quota_init = mds_quota_init,
856 .quota_exit = mds_quota_exit,
857 .quota_setup = mds_quota_setup,
858 .quota_cleanup = mds_quota_cleanup,
859 .quota_check = target_quota_check,
860 .quota_ctl = mds_quota_ctl,
861 .quota_fs_cleanup =mds_quota_fs_cleanup,
862 .quota_recovery = mds_quota_recovery,
863 .quota_adjust = mds_quota_adjust,
864 .quota_chkquota = mds_quota_check,
865 .quota_acquire = mds_quota_acquire,
866 .quota_pending_commit = mds_quota_pending_commit,
869 quota_interface_t filter_quota_interface = {
870 .quota_setup = filter_quota_setup,
871 .quota_cleanup = filter_quota_cleanup,
872 .quota_check = target_quota_check,
873 .quota_ctl = filter_quota_ctl,
874 .quota_setinfo = filter_quota_setinfo,
875 .quota_clearinfo = filter_quota_clearinfo,
876 .quota_enforce = filter_quota_enforce,
877 .quota_getflag = filter_quota_getflag,
878 .quota_acquire = filter_quota_acquire,
879 .quota_adjust = filter_quota_adjust,
880 .quota_chkquota = filter_quota_check,
881 .quota_adjust_qunit = filter_quota_adjust_qunit,
882 .quota_pending_commit = filter_quota_pending_commit,
885 #endif /* __KERNEL__ */
887 quota_interface_t mdc_quota_interface = {
888 .quota_ctl = client_quota_ctl,
889 .quota_check = client_quota_check,
890 .quota_poll_check = client_quota_poll_check,
893 quota_interface_t osc_quota_interface = {
894 .quota_ctl = client_quota_ctl,
895 .quota_check = client_quota_check,
896 .quota_poll_check = client_quota_poll_check,
897 .quota_init = osc_quota_init,
898 .quota_exit = osc_quota_exit,
899 .quota_chkdq = osc_quota_chkdq,
900 .quota_setdq = osc_quota_setdq,
901 .quota_cleanup = osc_quota_cleanup,
902 .quota_adjust_qunit = client_quota_adjust_qunit,
905 quota_interface_t lov_quota_interface = {
906 .quota_check = lov_quota_check,
907 .quota_ctl = lov_quota_ctl,
908 .quota_adjust_qunit = lov_quota_adjust_qunit,
913 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
915 static int __init init_lustre_quota(void)
917 #ifdef HAVE_QUOTA_SUPPORT
920 lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
923 if (IS_ERR(lquota_type_proc_dir)) {
924 CERROR("LProcFS failed in lquota-init\n");
925 rc = PTR_ERR(lquota_type_proc_dir);
929 rc = qunit_cache_init();
933 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
934 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
936 PORTAL_SYMBOL_REGISTER(mdc_quota_interface);
937 PORTAL_SYMBOL_REGISTER(osc_quota_interface);
938 PORTAL_SYMBOL_REGISTER(lov_quota_interface);
942 static void /*__exit*/ exit_lustre_quota(void)
944 PORTAL_SYMBOL_UNREGISTER(mdc_quota_interface);
945 PORTAL_SYMBOL_UNREGISTER(osc_quota_interface);
946 PORTAL_SYMBOL_UNREGISTER(lov_quota_interface);
947 #ifdef HAVE_QUOTA_SUPPORT
948 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
949 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
951 qunit_cache_cleanup();
953 if (lquota_type_proc_dir)
954 lprocfs_remove(&lquota_type_proc_dir);
958 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
959 MODULE_DESCRIPTION("Lustre Quota");
960 MODULE_LICENSE("GPL");
962 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
964 #ifdef HAVE_QUOTA_SUPPORT
965 EXPORT_SYMBOL(mds_quota_interface);
966 EXPORT_SYMBOL(filter_quota_interface);
968 EXPORT_SYMBOL(mdc_quota_interface);
969 EXPORT_SYMBOL(osc_quota_interface);
970 EXPORT_SYMBOL(lov_quota_interface);
971 #endif /* __KERNEL */