1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LQUOTA
43 # include <linux/version.h>
44 # include <linux/module.h>
45 # include <linux/init.h>
46 # include <linux/fs.h>
47 # include <linux/smp_lock.h>
48 # include <linux/buffer_head.h>
49 # include <linux/workqueue.h>
50 # include <linux/mount.h>
51 #else /* __KERNEL__ */
52 # include <liblustre.h>
55 /* Linux 2.6.34+ no longer define QUOTA_OK */
60 #include <obd_class.h>
61 #include <lustre_mds.h>
62 #include <lustre_dlm.h>
63 #include <lustre_cfg.h>
65 #include <lustre_fsfilt.h>
66 #include <lustre_quota.h>
67 #include <lprocfs_status.h>
68 #include "quota_internal.h"
72 #ifdef HAVE_QUOTA_SUPPORT
74 static cfs_time_t last_print = 0;
75 static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
77 static int filter_quota_setup(struct obd_device *obd)
80 struct obd_device_target *obt = &obd->u.obt;
84 obt->obt_qfmt = LUSTRE_QUOTA_V2;
86 obt->obt_qfmt = LUSTRE_QUOTA_V1;
88 atomic_set(&obt->obt_quotachecking, 1);
89 rc = qctxt_init(obd, NULL);
91 CERROR("initialize quota context failed! (rc:%d)\n", rc);
96 static int filter_quota_cleanup(struct obd_device *obd)
98 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
102 static int filter_quota_setinfo(struct obd_export *exp, struct obd_device *obd)
104 struct obd_import *imp;
105 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
108 /* setup the quota context import */
109 spin_lock(&obd->u.obt.obt_qctxt.lqc_lock);
110 obd->u.obt.obt_qctxt.lqc_import = exp->exp_imp_reverse;
111 spin_unlock(&obd->u.obt.obt_qctxt.lqc_lock);
112 CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated now, \n",
113 obd->obd_name,exp->exp_imp_reverse, obd);
115 /* make imp's connect flags equal relative exp's connect flags
116 * adding it to avoid the scan export list
118 imp = exp->exp_imp_reverse;
120 imp->imp_connect_data.ocd_connect_flags |=
121 (exp->exp_connect_flags &
122 (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
124 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
125 /* start quota slave recovery thread. (release high limits) */
126 qslave_start_recovery(obd, &obd->u.obt.obt_qctxt);
130 static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
132 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
135 /* lquota may be not set up before destroying export, b=14896 */
136 if (!obd->obd_set_up)
139 /* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
140 * should be invalid b=12374 */
141 if (qctxt->lqc_import && qctxt->lqc_import == exp->exp_imp_reverse) {
142 spin_lock(&qctxt->lqc_lock);
143 qctxt->lqc_import = NULL;
144 spin_unlock(&qctxt->lqc_lock);
145 ptlrpc_cleanup_imp(exp->exp_imp_reverse);
146 dqacq_interrupt(qctxt);
147 CDEBUG(D_QUOTA, "%s: lqc_import of obd(%p) is invalid now.\n",
153 static int target_quota_enforce(struct obd_device *obd, unsigned int ignore)
157 if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
160 if (!!cfs_cap_raised(CFS_CAP_SYS_RESOURCE) == !!ignore)
164 CDEBUG(D_QUOTA, "blocks will be written with ignoring quota.\n");
165 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
167 cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
173 #define GET_OA_ID(flag, oa) (flag == USRQUOTA ? oa->o_uid : oa->o_gid)
174 static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
176 struct obd_device_target *obt = &obd->u.obt;
177 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
178 int err, cnt, rc = 0;
179 struct obd_quotactl *oqctl;
182 if (!ll_sb_any_quota_active(obt->obt_sb))
185 OBD_ALLOC_PTR(oqctl);
189 /* set over quota flags for a uid/gid */
190 oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
191 oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
193 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
194 struct lustre_qunit_size *lqs = NULL;
196 lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)),
198 if (lqs == NULL || IS_ERR(lqs)) {
202 spin_lock(&lqs->lqs_lock);
203 if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
204 oa->o_flags |= (cnt == USRQUOTA) ?
205 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
206 CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
207 "sync_blk(%d)\n", lqs->lqs_bunit_sz,
208 qctxt->lqc_sync_blk);
209 spin_unlock(&lqs->lqs_lock);
210 /* this is for quota_search_lqs */
214 spin_unlock(&lqs->lqs_lock);
215 /* this is for quota_search_lqs */
219 memset(oqctl, 0, sizeof(*oqctl));
221 oqctl->qc_cmd = Q_GETQUOTA;
222 oqctl->qc_type = cnt;
223 oqctl->qc_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
224 err = fsfilt_quotactl(obd, obt->obt_sb, oqctl);
228 oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
233 if (oqctl->qc_dqblk.dqb_bhardlimit &&
234 (toqb(oqctl->qc_dqblk.dqb_curspace) >=
235 oqctl->qc_dqblk.dqb_bhardlimit))
236 oa->o_flags |= (cnt == USRQUOTA) ?
237 OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
243 static int filter_quota_acquire(struct obd_device *obd, unsigned int uid,
244 unsigned int gid, struct obd_trans_info *oti)
246 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
250 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, LQUOTA_FLAGS_BLK, 1, oti);
254 /* check whether the left quota of certain uid and gid can satisfy a block_write
255 * or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA */
256 static int quota_check_common(struct obd_device *obd, unsigned int uid,
257 unsigned int gid, int count, int cycle, int isblk,
258 struct inode *inode, int frags, int pending[2])
260 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
262 __u32 id[MAXQUOTAS] = { uid, gid };
263 struct qunit_data qdata[MAXQUOTAS];
265 int rc = 0, rc2[2] = { 0, 0 };
268 spin_lock(&qctxt->lqc_lock);
269 if (!qctxt->lqc_valid){
270 spin_unlock(&qctxt->lqc_lock);
273 spin_unlock(&qctxt->lqc_lock);
275 for (i = 0; i < MAXQUOTAS; i++) {
276 struct lustre_qunit_size *lqs = NULL;
278 qdata[i].qd_id = id[i];
279 qdata[i].qd_flags = i;
281 QDATA_SET_BLK(&qdata[i]);
282 qdata[i].qd_count = 0;
284 /* ignore root user */
285 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
288 lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
289 if (lqs == NULL || IS_ERR(lqs))
292 rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
293 spin_lock(&lqs->lqs_lock);
296 pending[i] = count * CFS_PAGE_SIZE;
297 /* in order to complete this write, we need extra
298 * meta blocks. This function can get it through
299 * data needed to be written b=16542 */
301 LASSERT(inode && frags > 0);
302 if (fsfilt_get_mblk(obd, qctxt->lqc_sb, &mb,
304 CERROR("%s: can't get extra meta "
305 "blocks\n", obd->obd_name);
308 lqs->lqs_bwrite_pending += pending[i];
311 lqs->lqs_iwrite_pending += pending[i];
315 /* if xx_rec < 0, that means quota are releasing,
316 * and it may return before we use quota. So if
317 * we find this situation, we assuming it has
318 * returned b=18491 */
319 if (isblk && lqs->lqs_blk_rec < 0) {
320 if (qdata[i].qd_count < -lqs->lqs_blk_rec)
321 qdata[i].qd_count = 0;
323 qdata[i].qd_count += lqs->lqs_blk_rec;
325 if (!isblk && lqs->lqs_ino_rec < 0) {
326 if (qdata[i].qd_count < -lqs->lqs_ino_rec)
327 qdata[i].qd_count = 0;
329 qdata[i].qd_count += lqs->lqs_ino_rec;
332 CDEBUG(D_QUOTA, "count=%d lqs_pending=%lu qd_count="LPU64
333 " isblk=%d mb=%d pending[%d]=%d\n", count,
334 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
335 qdata[i].qd_count, isblk, mb, i, pending[i]);
336 if (rc2[i] == QUOTA_RET_OK) {
337 if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
338 rc2[i] = QUOTA_RET_ACQUOTA;
339 if (!isblk && qdata[i].qd_count <
340 lqs->lqs_iwrite_pending)
341 rc2[i] = QUOTA_RET_ACQUOTA;
344 spin_unlock(&lqs->lqs_lock);
346 if (lqs->lqs_blk_rec < 0 &&
348 lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
349 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
351 /* When cycle is zero, lqs_*_pending will be changed. We will
352 * get reference of the lqs here and put reference of lqs in
353 * quota_pending_commit b=14784 */
357 /* this is for quota_search_lqs */
361 if (rc2[0] == QUOTA_RET_ACQUOTA || rc2[1] == QUOTA_RET_ACQUOTA)
362 RETURN(QUOTA_RET_ACQUOTA);
367 static int quota_chk_acq_common(struct obd_export *exp, unsigned int uid,
368 unsigned int gid, int count, int pending[2],
369 int isblk, quota_acquire acquire,
370 struct obd_trans_info *oti, struct inode *inode,
373 struct obd_device *obd = exp->exp_obd;
374 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
375 struct timeval work_start;
376 struct timeval work_end;
378 struct l_wait_info lwi = { 0 };
379 int rc = 0, cycle = 0, count_err = 1;
382 CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
383 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
384 /* If the client has been evicted or if it
385 * timed out and tried to reconnect already,
386 * abort the request immediately */
389 /* Unfortunately, if quota master is too busy to handle the
390 * pre-dqacq in time and quota hash on ost is used up, we
391 * have to wait for the completion of in flight dqacq/dqrel,
392 * in order to get enough quota for write b=12588 */
393 do_gettimeofday(&work_start);
394 while ((rc = quota_check_common(obd, uid, gid, count, cycle, isblk,
395 inode, frags, pending)) & QUOTA_RET_ACQUOTA) {
397 spin_lock(&qctxt->lqc_lock);
398 if (!qctxt->lqc_import && oti) {
399 spin_unlock(&qctxt->lqc_lock);
401 LASSERT(oti && oti->oti_thread &&
402 oti->oti_thread->t_watchdog);
404 lc_watchdog_disable(oti->oti_thread->t_watchdog);
405 CDEBUG(D_QUOTA, "sleep for quota master\n");
406 l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
408 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
409 lc_watchdog_touch(oti->oti_thread->t_watchdog,
410 GET_TIMEOUT(oti->oti_thread->t_svc));
412 spin_unlock(&qctxt->lqc_lock);
417 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
418 /* after acquire(), we should run quota_check_common again
419 * so that we confirm there are enough quota to finish write */
420 rc = acquire(obd, uid, gid, oti);
422 /* please reference to dqacq_completion for the below */
423 /* a new request is finished, try again */
424 if (rc == QUOTA_REQ_RETURNED) {
425 CDEBUG(D_QUOTA, "finish a quota req, try again\n");
429 /* it is out of quota already */
431 CDEBUG(D_QUOTA, "out of quota, return -EDQUOT\n");
435 if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
436 /* The client has been evicted or tried to
437 * to reconnect already, abort the request */
440 /* -EBUSY and others, wait a second and try again */
443 struct l_wait_info lwi;
445 if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
446 lc_watchdog_touch(oti->oti_thread->t_watchdog,
447 GET_TIMEOUT(oti->oti_thread->t_svc));
448 CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
451 init_waitqueue_head(&waitq);
452 lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
454 l_wait_event(waitq, 0, &lwi);
457 if (rc < 0 || cycle % 10 == 0) {
458 spin_lock(&last_print_lock);
459 if (last_print == 0 ||
460 cfs_time_before((last_print + cfs_time_seconds(30)),
461 cfs_time_current())) {
462 CWARN("still haven't managed to acquire quota "
463 "space from the quota master after %d "
464 "retries (err=%d, rc=%d)\n",
465 cycle, count_err - 1, rc);
466 last_print = cfs_time_current();
468 spin_unlock(&last_print_lock);
471 CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
475 do_gettimeofday(&work_end);
476 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
477 lprocfs_counter_add(qctxt->lqc_stats,
478 isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
479 LQUOTA_WAIT_FOR_CHK_INO,
487 int quota_is_set(struct obd_device *obd, unsigned int uid,
488 unsigned int gid, int flag)
490 struct lustre_qunit_size *lqs;
491 __u32 id[MAXQUOTAS] = { uid, gid };
494 if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
497 for (i = 0; i < MAXQUOTAS; i++) {
498 lqs = quota_search_lqs(LQS_KEY(i, id[i]),
499 &obd->u.obt.obt_qctxt, 0);
500 if (lqs && !IS_ERR(lqs)) {
501 if (lqs->lqs_flags & flag)
510 static int filter_quota_check(struct obd_export *exp, unsigned int uid,
511 unsigned int gid, int npage, int pending[2],
512 quota_acquire acquire, struct obd_trans_info *oti,
513 struct inode *inode, int frags)
515 return quota_is_set(exp->exp_obd, uid, gid, QB_SET) ?
516 quota_chk_acq_common(exp, uid, gid, npage, pending,
517 LQUOTA_FLAGS_BLK, acquire, oti, inode,
521 /* when a block_write or inode_create rpc is finished, adjust the record for
522 * pending blocks and inodes*/
523 static int quota_pending_commit(struct obd_device *obd, unsigned int uid,
524 unsigned int gid, int pending[2], int isblk)
526 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
527 struct timeval work_start;
528 struct timeval work_end;
531 __u32 id[MAXQUOTAS] = { uid, gid };
532 struct qunit_data qdata[MAXQUOTAS];
535 CDEBUG(D_QUOTA, "%s: commit pending quota\n", obd->obd_name);
536 CLASSERT(MAXQUOTAS < 4);
538 do_gettimeofday(&work_start);
539 for (i = 0; i < MAXQUOTAS; i++) {
540 struct lustre_qunit_size *lqs = NULL;
543 qdata[i].qd_id = id[i];
544 qdata[i].qd_flags = i;
546 QDATA_SET_BLK(&qdata[i]);
547 qdata[i].qd_count = 0;
549 if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
552 lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0);
553 if (lqs == NULL || IS_ERR(lqs))
556 spin_lock(&lqs->lqs_lock);
558 if (lqs->lqs_bwrite_pending >= pending[i]) {
559 lqs->lqs_bwrite_pending -= pending[i];
562 CERROR("%s: there are too many blocks!\n",
566 if (lqs->lqs_iwrite_pending >= pending[i]) {
567 lqs->lqs_iwrite_pending -= pending[i];
570 CERROR("%s: there are too many files!\n",
574 CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n",
576 isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
577 i, pending[i], isblk);
579 spin_unlock(&lqs->lqs_lock);
581 /* When lqs_*_pening is changed back, we'll putref lqs
586 do_gettimeofday(&work_end);
587 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
588 lprocfs_counter_add(qctxt->lqc_stats,
589 isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
590 LQUOTA_WAIT_FOR_COMMIT_INO,
596 static int filter_quota_pending_commit(struct obd_device *obd, unsigned int uid,
597 unsigned int gid, int pending[2])
599 return quota_pending_commit(obd, uid, gid, pending, LQUOTA_FLAGS_BLK);
602 static int mds_quota_init(void)
604 return lustre_dquot_init();
607 static int mds_quota_exit(void)
613 static int mds_quota_setup(struct obd_device *obd)
615 struct obd_device_target *obt = &obd->u.obt;
616 struct mds_obd *mds = &obd->u.mds;
621 obt->obt_qfmt = LUSTRE_QUOTA_V2;
623 obt->obt_qfmt = LUSTRE_QUOTA_V1;
625 mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
626 atomic_set(&obt->obt_quotachecking, 1);
627 /* initialize quota master and quota context */
628 sema_init(&mds->mds_qonoff_sem, 1);
629 rc = qctxt_init(obd, dqacq_handler);
631 CERROR("%s: initialize quota context failed! (rc:%d)\n",
638 static int mds_quota_cleanup(struct obd_device *obd)
640 qctxt_cleanup(&obd->u.obt.obt_qctxt, 0);
644 static int mds_quota_fs_cleanup(struct obd_device *obd)
646 struct mds_obd *mds = &obd->u.mds;
647 struct obd_quotactl oqctl;
650 memset(&oqctl, 0, sizeof(oqctl));
651 oqctl.qc_type = UGQUOTA;
653 down(&mds->mds_qonoff_sem);
654 mds_admin_quota_off(obd, &oqctl);
655 up(&mds->mds_qonoff_sem);
659 static int mds_quota_check(struct obd_export *exp, unsigned int uid,
660 unsigned int gid, int inodes, int pending[2],
661 quota_acquire acquire, struct obd_trans_info *oti,
662 struct inode *inode, int frags)
664 return quota_is_set(exp->exp_obd, uid, gid, QI_SET) ?
665 quota_chk_acq_common(exp, uid, gid, inodes, pending, 0,
666 acquire, oti, inode, frags) : 0;
669 static int mds_quota_acquire(struct obd_device *obd, unsigned int uid,
670 unsigned int gid, struct obd_trans_info *oti)
672 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
676 rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, 0, 1, oti);
680 static int mds_quota_pending_commit(struct obd_device *obd, unsigned int uid,
681 unsigned int gid, int pending[2])
683 return quota_pending_commit(obd, uid, gid, pending, 0);
685 #endif /* HAVE_QUOTA_SUPPORT */
686 #endif /* __KERNEL__ */
688 struct osc_quota_info {
689 struct list_head oqi_hash; /* hash list */
690 struct client_obd *oqi_cli; /* osc obd */
691 unsigned int oqi_id; /* uid/gid of a file */
692 short oqi_type; /* quota type */
695 spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
697 static struct list_head qinfo_hash[NR_DQHASH];
698 /* SLAB cache for client quota context */
699 cfs_mem_cache_t *qinfo_cachep = NULL;
701 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
702 __attribute__((__const__));
704 static inline int hashfn(struct client_obd *cli, unsigned long id, int type)
706 unsigned long tmp = ((unsigned long)cli>>6) ^ id;
707 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
711 /* caller must hold qinfo_list_lock */
712 static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
714 struct list_head *head = qinfo_hash +
715 hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
717 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
718 list_add(&oqi->oqi_hash, head);
721 /* caller must hold qinfo_list_lock */
722 static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
724 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
725 list_del_init(&oqi->oqi_hash);
728 /* caller must hold qinfo_list_lock */
729 static inline struct osc_quota_info *find_qinfo(struct client_obd *cli,
730 unsigned int id, int type)
732 unsigned int hashent = hashfn(cli, id, type);
733 struct osc_quota_info *oqi;
735 LASSERT_SPIN_LOCKED(&qinfo_list_lock);
736 list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
737 if (oqi->oqi_cli == cli &&
738 oqi->oqi_id == id && oqi->oqi_type == type)
744 static struct osc_quota_info *alloc_qinfo(struct client_obd *cli,
745 unsigned int id, int type)
747 struct osc_quota_info *oqi;
750 OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_IO, sizeof(*oqi));
754 CFS_INIT_LIST_HEAD(&oqi->oqi_hash);
757 oqi->oqi_type = type;
762 static void free_qinfo(struct osc_quota_info *oqi)
764 OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
767 int osc_quota_chkdq(struct client_obd *cli, unsigned int uid, unsigned int gid)
770 int cnt, rc = QUOTA_OK;
773 spin_lock(&qinfo_list_lock);
774 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
775 struct osc_quota_info *oqi = NULL;
777 id = (cnt == USRQUOTA) ? uid : gid;
778 oqi = find_qinfo(cli, id, cnt);
784 spin_unlock(&qinfo_list_lock);
789 int osc_quota_setdq(struct client_obd *cli, unsigned int uid, unsigned int gid,
790 obd_flag valid, obd_flag flags)
798 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
799 struct osc_quota_info *oqi, *old;
801 if (!(valid & ((cnt == USRQUOTA) ?
802 OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
805 id = (cnt == USRQUOTA) ? uid : gid;
806 noquota = (cnt == USRQUOTA) ?
807 (flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
809 oqi = alloc_qinfo(cli, id, cnt);
815 spin_lock(&qinfo_list_lock);
816 old = find_qinfo(cli, id, cnt);
818 remove_qinfo_hash(old);
819 else if (!old && noquota)
820 insert_qinfo_hash(oqi);
821 spin_unlock(&qinfo_list_lock);
832 int osc_quota_cleanup(struct obd_device *obd)
834 struct client_obd *cli = &obd->u.cli;
835 struct osc_quota_info *oqi, *n;
839 spin_lock(&qinfo_list_lock);
840 for (i = 0; i < NR_DQHASH; i++) {
841 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
842 if (oqi->oqi_cli != cli)
844 remove_qinfo_hash(oqi);
848 spin_unlock(&qinfo_list_lock);
853 int osc_quota_init(void)
858 LASSERT(qinfo_cachep == NULL);
859 qinfo_cachep = cfs_mem_cache_create("osc_quota_info",
860 sizeof(struct osc_quota_info),
865 for (i = 0; i < NR_DQHASH; i++)
866 CFS_INIT_LIST_HEAD(qinfo_hash + i);
871 int osc_quota_exit(void)
873 struct osc_quota_info *oqi, *n;
877 spin_lock(&qinfo_list_lock);
878 for (i = 0; i < NR_DQHASH; i++) {
879 list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
880 remove_qinfo_hash(oqi);
884 spin_unlock(&qinfo_list_lock);
886 rc = cfs_mem_cache_destroy(qinfo_cachep);
887 LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");
894 #ifdef HAVE_QUOTA_SUPPORT
895 quota_interface_t mds_quota_interface = {
896 .quota_init = mds_quota_init,
897 .quota_exit = mds_quota_exit,
898 .quota_setup = mds_quota_setup,
899 .quota_cleanup = mds_quota_cleanup,
900 .quota_check = target_quota_check,
901 .quota_ctl = mds_quota_ctl,
902 .quota_fs_cleanup =mds_quota_fs_cleanup,
903 .quota_recovery = mds_quota_recovery,
904 .quota_adjust = mds_quota_adjust,
905 .quota_chkquota = mds_quota_check,
906 .quota_enforce = target_quota_enforce,
907 .quota_acquire = mds_quota_acquire,
908 .quota_pending_commit = mds_quota_pending_commit,
911 quota_interface_t filter_quota_interface = {
912 .quota_setup = filter_quota_setup,
913 .quota_cleanup = filter_quota_cleanup,
914 .quota_check = target_quota_check,
915 .quota_ctl = filter_quota_ctl,
916 .quota_setinfo = filter_quota_setinfo,
917 .quota_clearinfo = filter_quota_clearinfo,
918 .quota_enforce = target_quota_enforce,
919 .quota_getflag = filter_quota_getflag,
920 .quota_acquire = filter_quota_acquire,
921 .quota_adjust = filter_quota_adjust,
922 .quota_chkquota = filter_quota_check,
923 .quota_adjust_qunit = filter_quota_adjust_qunit,
924 .quota_pending_commit = filter_quota_pending_commit,
927 #endif /* __KERNEL__ */
929 quota_interface_t mdc_quota_interface = {
930 .quota_ctl = client_quota_ctl,
931 .quota_check = client_quota_check,
932 .quota_poll_check = client_quota_poll_check,
935 quota_interface_t osc_quota_interface = {
936 .quota_ctl = client_quota_ctl,
937 .quota_check = client_quota_check,
938 .quota_poll_check = client_quota_poll_check,
939 .quota_init = osc_quota_init,
940 .quota_exit = osc_quota_exit,
941 .quota_chkdq = osc_quota_chkdq,
942 .quota_setdq = osc_quota_setdq,
943 .quota_cleanup = osc_quota_cleanup,
944 .quota_adjust_qunit = client_quota_adjust_qunit,
947 quota_interface_t lov_quota_interface = {
948 .quota_check = lov_quota_check,
949 .quota_ctl = lov_quota_ctl,
950 .quota_adjust_qunit = lov_quota_adjust_qunit,
955 cfs_proc_dir_entry_t *lquota_type_proc_dir = NULL;
957 static int __init init_lustre_quota(void)
959 #ifdef HAVE_QUOTA_SUPPORT
962 lquota_type_proc_dir = lprocfs_register(OBD_LQUOTA_DEVICENAME,
965 if (IS_ERR(lquota_type_proc_dir)) {
966 CERROR("LProcFS failed in lquota-init\n");
967 rc = PTR_ERR(lquota_type_proc_dir);
971 rc = qunit_cache_init();
975 PORTAL_SYMBOL_REGISTER(filter_quota_interface);
976 PORTAL_SYMBOL_REGISTER(mds_quota_interface);
978 PORTAL_SYMBOL_REGISTER(mdc_quota_interface);
979 PORTAL_SYMBOL_REGISTER(osc_quota_interface);
980 PORTAL_SYMBOL_REGISTER(lov_quota_interface);
984 static void /*__exit*/ exit_lustre_quota(void)
986 PORTAL_SYMBOL_UNREGISTER(mdc_quota_interface);
987 PORTAL_SYMBOL_UNREGISTER(osc_quota_interface);
988 PORTAL_SYMBOL_UNREGISTER(lov_quota_interface);
989 #ifdef HAVE_QUOTA_SUPPORT
990 PORTAL_SYMBOL_UNREGISTER(filter_quota_interface);
991 PORTAL_SYMBOL_UNREGISTER(mds_quota_interface);
993 qunit_cache_cleanup();
995 if (lquota_type_proc_dir)
996 lprocfs_remove(&lquota_type_proc_dir);
1000 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
1001 MODULE_DESCRIPTION("Lustre Quota");
1002 MODULE_LICENSE("GPL");
1004 cfs_module(lquota, "1.0.0", init_lustre_quota, exit_lustre_quota);
1006 #ifdef HAVE_QUOTA_SUPPORT
1007 EXPORT_SYMBOL(mds_quota_interface);
1008 EXPORT_SYMBOL(filter_quota_interface);
1010 EXPORT_SYMBOL(mdc_quota_interface);
1011 EXPORT_SYMBOL(osc_quota_interface);
1012 EXPORT_SYMBOL(lov_quota_interface);
1013 #endif /* __KERNEL */