1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/quota/quota_context.c
38 * Lustre Quota Context
40 * Author: Niu YaWei <niu@clusterfs.com>
44 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_LQUOTA
49 #include <linux/version.h>
51 #include <asm/unistd.h>
52 #include <linux/slab.h>
53 #include <linux/quotaops.h>
54 #include <linux/module.h>
55 #include <linux/init.h>
57 #include <obd_class.h>
58 #include <lustre_quota.h>
59 #include <lustre_fsfilt.h>
60 #include <lprocfs_status.h>
61 #include "quota_internal.h"
63 #ifdef HAVE_QUOTA_SUPPORT
65 static cfs_hash_ops_t lqs_hash_ops;
67 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
68 unsigned long default_btune_ratio = 50; /* 50 percentage */
69 unsigned long default_iunit_sz = 5120; /* 5120 inodes */
70 unsigned long default_itune_ratio = 50; /* 50 percentage */
72 cfs_mem_cache_t *qunit_cachep = NULL;
73 struct list_head qunit_hash[NR_DQHASH];
74 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
76 /* please sync qunit_state with qunit_state_names */
83 * a qunit is added into qunit hash, that means
84 * a quota req will be sent or is flying
88 * a qunit is removed from qunit hash, that
89 * means a quota req is handled and comes back
91 QUNIT_RM_FROM_HASH = 2,
93 * qunit can wake up all threads waiting for it
98 static const char *qunit_state_names[] = {
99 [QUNIT_CREATED] = "CREATED",
100 [QUNIT_IN_HASH] = "IN_HASH",
101 [QUNIT_RM_FROM_HASH] = "RM_FROM_HASH",
102 [QUNIT_FINISHED] = "FINISHED",
105 struct lustre_qunit {
106 struct list_head lq_hash; /** Hash list in memory */
107 atomic_t lq_refcnt; /** Use count */
108 struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
109 struct qunit_data lq_data; /** See qunit_data */
110 unsigned int lq_opc; /** QUOTA_DQACQ, QUOTA_DQREL */
111 cfs_waitq_t lq_waitq; /** Threads waiting for this qunit */
112 spinlock_t lq_lock; /** Protect the whole structure */
113 enum qunit_state lq_state; /** Present the status of qunit */
114 int lq_rc; /** The rc of lq_data */
118 #define QUNIT_SET_STATE(qunit, state) \
120 spin_lock(&qunit->lq_lock); \
121 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
122 "lq_rc(%d), lq_owner(%d)\n", \
123 qunit, qunit_state_names[qunit->lq_state], \
124 qunit_state_names[state], qunit->lq_rc, \
126 qunit->lq_state = state; \
127 spin_unlock(&qunit->lq_lock); \
130 #define QUNIT_SET_STATE_AND_RC(qunit, state, rc) \
132 spin_lock(&qunit->lq_lock); \
134 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
135 "lq_rc(%d), lq_owner(%d)\n", \
136 qunit, qunit_state_names[qunit->lq_state], \
137 qunit_state_names[state], qunit->lq_rc, \
139 qunit->lq_state = state; \
140 spin_unlock(&qunit->lq_lock); \
143 int should_translate_quota (struct obd_import *imp)
148 if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
154 void qunit_cache_cleanup(void)
159 spin_lock(&qunit_hash_lock);
160 for (i = 0; i < NR_DQHASH; i++)
161 LASSERT(list_empty(qunit_hash + i));
162 spin_unlock(&qunit_hash_lock);
166 rc = cfs_mem_cache_destroy(qunit_cachep);
167 LASSERTF(rc == 0, "couldn't destroy qunit_cache slab\n");
173 int qunit_cache_init(void)
178 LASSERT(qunit_cachep == NULL);
179 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
180 sizeof(struct lustre_qunit),
185 spin_lock(&qunit_hash_lock);
186 for (i = 0; i < NR_DQHASH; i++)
187 CFS_INIT_LIST_HEAD(qunit_hash + i);
188 spin_unlock(&qunit_hash_lock);
193 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
194 __attribute__((__const__));
197 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
199 unsigned int id = qdata->qd_id;
200 unsigned int type = QDATA_IS_GRP(qdata);
202 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
203 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
207 /* caller must hold qunit_hash_lock */
208 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
209 struct lustre_quota_ctxt *qctxt,
210 struct qunit_data *qdata)
212 struct lustre_qunit *qunit = NULL;
213 struct qunit_data *tmp;
215 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
216 list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
217 tmp = &qunit->lq_data;
218 if (qunit->lq_ctxt == qctxt &&
219 qdata->qd_id == tmp->qd_id &&
220 (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
221 (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
227 /* check_cur_qunit - check the current usage of qunit.
228 * @qctxt: quota context
229 * @qdata: the type of quota unit to be checked
231 * return: 1 - need acquire qunit;
232 * 2 - need release qunit;
233 * 0 - need do nothing.
237 check_cur_qunit(struct obd_device *obd,
238 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
240 struct super_block *sb = qctxt->lqc_sb;
241 unsigned long qunit_sz, tune_sz;
242 __u64 usage, limit, limit_org, pending_write = 0;
243 long long record = 0;
244 struct obd_quotactl *qctl;
245 struct lustre_qunit_size *lqs = NULL;
249 if (!ll_sb_any_quota_active(sb))
252 spin_lock(&qctxt->lqc_lock);
253 if (!qctxt->lqc_valid){
254 spin_unlock(&qctxt->lqc_lock);
257 spin_unlock(&qctxt->lqc_lock);
263 /* get fs quota usage & limit */
264 qctl->qc_cmd = Q_GETQUOTA;
265 qctl->qc_id = qdata->qd_id;
266 qctl->qc_type = QDATA_IS_GRP(qdata);
267 ret = fsfilt_quotactl(obd, sb, qctl);
269 if (ret == -ESRCH) /* no limit */
272 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
276 if (QDATA_IS_BLK(qdata)) {
277 usage = qctl->qc_dqblk.dqb_curspace;
278 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
280 usage = qctl->qc_dqblk.dqb_curinodes;
281 limit = qctl->qc_dqblk.dqb_ihardlimit;
284 /* ignore the no quota limit case; and it can avoid creating
285 * unnecessary lqs for uid/gid */
289 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
291 if (IS_ERR(lqs) || lqs == NULL) {
292 CERROR("fail to find a lqs for %sid: %u)!\n",
293 QDATA_IS_GRP(qdata) ? "g" : "u", qdata->qd_id);
296 spin_lock(&lqs->lqs_lock);
298 if (QDATA_IS_BLK(qdata)) {
299 qunit_sz = lqs->lqs_bunit_sz;
300 tune_sz = lqs->lqs_btune_sz;
301 pending_write = lqs->lqs_bwrite_pending;
302 record = lqs->lqs_blk_rec;
303 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
305 /* we didn't need change inode qunit size now */
306 qunit_sz = lqs->lqs_iunit_sz;
307 tune_sz = lqs->lqs_itune_sz;
308 pending_write = lqs->lqs_iwrite_pending;
309 record = lqs->lqs_ino_rec;
312 /* we don't count the MIN_QLIMIT */
313 if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
314 (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
317 usage += pending_write;
319 /* when a releasing quota req is sent, before it returned
320 limit is assigned a small value. limit will overflow */
321 if (limit + record < 0)
326 LASSERT(qdata->qd_count == 0);
327 if (limit <= usage + tune_sz) {
328 while (qdata->qd_count + limit <=
330 qdata->qd_count += qunit_sz;
332 } else if (limit > usage + qunit_sz + tune_sz &&
333 limit_org > qdata->qd_count + qunit_sz) {
334 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
335 limit_org > qdata->qd_count + qunit_sz)
336 qdata->qd_count += qunit_sz;
338 /* if there are other pending writes for this uid/gid, releasing
339 * quota is put off until the last pending write b=16645 */
340 if (ret == 2 && pending_write) {
341 CDEBUG(D_QUOTA, "delay quota release\n");
345 CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
346 ", pending_write: "LPU64", record: "LPD64
347 ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
348 QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
349 record, qunit_sz, tune_sz, ret);
350 LASSERT(ret == 0 || qdata->qd_count);
352 spin_unlock(&lqs->lqs_lock);
362 * Compute the remaining quota for certain gid or uid b=11693
364 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
365 struct qunit_data *qdata, int isblk)
367 struct super_block *sb = qctxt->lqc_sb;
369 struct obd_quotactl *qctl;
370 int ret = QUOTA_RET_OK;
373 if (!ll_sb_any_quota_active(sb))
374 RETURN(QUOTA_RET_NOQUOTA);
376 /* ignore root user */
377 if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
378 RETURN(QUOTA_RET_NOLIMIT);
384 /* get fs quota usage & limit */
385 qctl->qc_cmd = Q_GETQUOTA;
386 qctl->qc_id = qdata->qd_id;
387 qctl->qc_type = QDATA_IS_GRP(qdata);
388 ret = fsfilt_quotactl(obd, sb, qctl);
390 if (ret == -ESRCH) /* no limit */
391 ret = QUOTA_RET_NOLIMIT;
393 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
398 usage = isblk ? qctl->qc_dqblk.dqb_curspace :
399 qctl->qc_dqblk.dqb_curinodes;
400 limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
401 qctl->qc_dqblk.dqb_ihardlimit;
402 if (!limit){ /* no limit */
403 ret = QUOTA_RET_NOLIMIT;
408 qdata->qd_count = limit - usage;
417 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
418 struct qunit_data *qdata, int opc)
420 struct lustre_qunit *qunit = NULL;
423 OBD_SLAB_ALLOC_PTR_GFP(qunit, qunit_cachep, CFS_ALLOC_IO);
427 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
428 init_waitqueue_head(&qunit->lq_waitq);
429 atomic_set(&qunit->lq_refcnt, 1);
430 qunit->lq_ctxt = qctxt;
431 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
433 qunit->lq_lock = SPIN_LOCK_UNLOCKED;
434 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
435 qunit->lq_owner = cfs_curproc_pid();
439 static inline void free_qunit(struct lustre_qunit *qunit)
441 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
444 static inline void qunit_get(struct lustre_qunit *qunit)
446 atomic_inc(&qunit->lq_refcnt);
449 static void qunit_put(struct lustre_qunit *qunit)
451 LASSERT(atomic_read(&qunit->lq_refcnt));
452 if (atomic_dec_and_test(&qunit->lq_refcnt))
456 /* caller must hold qunit_hash_lock and release ref of qunit after using it */
457 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
458 struct qunit_data *qdata)
460 unsigned int hashent = qunit_hashfn(qctxt, qdata);
461 struct lustre_qunit *qunit;
464 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
465 qunit = find_qunit(hashent, qctxt, qdata);
472 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
474 struct list_head *head;
476 LASSERT(list_empty(&qunit->lq_hash));
478 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
479 list_add(&qunit->lq_hash, head);
480 QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
483 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
485 struct lustre_qunit_size *lqs;
487 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(&qunit->lq_data),
488 qunit->lq_data.qd_id),
490 if (lqs && !IS_ERR(lqs)) {
491 spin_lock(&lqs->lqs_lock);
492 if (qunit->lq_opc == QUOTA_DQACQ)
493 quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
494 if (qunit->lq_opc == QUOTA_DQREL)
495 quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
496 spin_unlock(&lqs->lqs_lock);
497 /* this is for quota_search_lqs */
499 /* this is for schedule_dqacq */
504 static void remove_qunit_nolock(struct lustre_qunit *qunit)
506 LASSERT(!list_empty(&qunit->lq_hash));
507 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
509 list_del_init(&qunit->lq_hash);
510 QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
514 void* quota_barrier(struct lustre_quota_ctxt *qctxt,
515 struct obd_quotactl *oqctl, int isblk)
517 struct lustre_qunit *qunit, *find_qunit;
520 OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
522 CERROR("locating %sunit failed for %sid %u\n",
523 isblk ? "b" : "i", oqctl->qc_type ? "g" : "u",
525 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
526 oqctl->qc_type, isblk);
530 INIT_LIST_HEAD(&qunit->lq_hash);
531 qunit->lq_lock = SPIN_LOCK_UNLOCKED;
532 init_waitqueue_head(&qunit->lq_waitq);
533 atomic_set(&qunit->lq_refcnt, 1);
534 qunit->lq_ctxt = qctxt;
535 qunit->lq_data.qd_id = oqctl->qc_id;
536 qunit->lq_data.qd_flags = oqctl->qc_type;
538 QDATA_SET_BLK(&qunit->lq_data);
539 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
540 /* it means it is only an invalid qunit for barrier */
541 qunit->lq_opc = QUOTA_LAST_OPC;
544 spin_lock(&qunit_hash_lock);
545 find_qunit = dqacq_in_flight(qctxt, &qunit->lq_data);
547 spin_unlock(&qunit_hash_lock);
548 qunit_put(find_qunit);
549 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
550 oqctl->qc_type, isblk);
551 CDEBUG(D_QUOTA, "cycle=%d\n", cycle++);
556 insert_qunit_nolock(qctxt, qunit);
557 spin_unlock(&qunit_hash_lock);
561 void quota_unbarrier(void *handle)
563 struct lustre_qunit *qunit = (struct lustre_qunit *)handle;
566 CERROR("handle is NULL\n");
570 LASSERT(qunit->lq_opc == QUOTA_LAST_OPC);
571 spin_lock(&qunit_hash_lock);
572 remove_qunit_nolock(qunit);
573 spin_unlock(&qunit_hash_lock);
574 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, QUOTA_REQ_RETURNED);
575 wake_up(&qunit->lq_waitq);
579 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
580 (limit = count) : (limit += count)
583 static inline int is_master(struct lustre_quota_ctxt *qctxt)
585 return qctxt->lqc_handler ? 1 : 0;
589 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
590 struct qunit_data *qdata, int opc, int wait,
591 struct obd_trans_info *oti);
593 static inline void qdata_to_oqaq(struct qunit_data *qdata,
594 struct quota_adjust_qunit *oqaq)
599 oqaq->qaq_flags = qdata->qd_flags;
600 oqaq->qaq_id = qdata->qd_id;
601 if (QDATA_IS_ADJBLK(qdata))
602 oqaq->qaq_bunit_sz = qdata->qd_qunit;
603 if (QDATA_IS_ADJINO(qdata))
604 oqaq->qaq_iunit_sz = qdata->qd_qunit;
608 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
609 struct qunit_data *qdata, int rc, int opc)
611 struct lustre_qunit *qunit = NULL;
612 struct super_block *sb = qctxt->lqc_sb;
614 struct quota_adjust_qunit *oqaq = NULL;
619 QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
620 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
622 /* do it only when a releasing quota req more than 5MB b=18491 */
623 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880)
624 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
626 /* update local operational quota file */
628 __u64 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
629 struct obd_quotactl *qctl;
634 GOTO(out, err = -ENOMEM);
636 /* acq/rel qunit for specified uid/gid is serialized,
637 * so there is no race between get fs quota limit and
638 * set fs quota limit */
639 qctl->qc_cmd = Q_GETQUOTA;
640 qctl->qc_id = qdata->qd_id;
641 qctl->qc_type = QDATA_IS_GRP(qdata);
642 err = fsfilt_quotactl(obd, sb, qctl);
644 CERROR("error get quota fs limit! (rc:%d)\n", err);
648 if (QDATA_IS_BLK(qdata)) {
649 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
650 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
652 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
653 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
656 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
663 INC_QLIMIT(*hardlimit, count);
666 LASSERTF(count < *hardlimit,
667 "id(%u) flag(%u) type(%c) isblk(%c) "
668 "count("LPU64") qd_qunit("LPU64") "
669 "hardlimit("LPU64").\n",
670 qdata->qd_id, qdata->qd_flags,
671 QDATA_IS_GRP(qdata) ? 'g' : 'u',
672 QDATA_IS_BLK(qdata) ? 'b': 'i',
673 qdata->qd_count, qdata->qd_qunit, *hardlimit);
680 /* clear quota limit */
684 qctl->qc_cmd = Q_SETQUOTA;
685 err = fsfilt_quotactl(obd, sb, qctl);
687 CERROR("error set quota fs limit! (rc:%d)\n", err);
689 QDATA_DEBUG(qdata, "%s completion\n",
690 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
693 } else if (rc == -EDQUOT) {
694 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
695 } else if (rc == -EBUSY) {
696 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
698 CERROR("acquire qunit got error! (rc:%d)\n", rc);
701 /* remove the qunit from hash */
702 spin_lock(&qunit_hash_lock);
704 qunit = dqacq_in_flight(qctxt, qdata);
705 /* this qunit has been removed by qctxt_cleanup() */
707 spin_unlock(&qunit_hash_lock);
708 QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
709 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
713 LASSERT(opc == qunit->lq_opc);
714 /* remove this qunit from lq_hash so that new processes cannot be added
715 * to qunit->lq_waiters */
716 remove_qunit_nolock(qunit);
717 spin_unlock(&qunit_hash_lock);
719 compute_lqs_after_removing_qunit(qunit);
722 rc = QUOTA_REQ_RETURNED;
723 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
724 /* wake up all waiters */
725 wake_up_all(&qunit->lq_waitq);
727 /* this is for dqacq_in_flight() */
729 if (rc < 0 && rc != -EDQUOT)
732 /* don't reschedule in such cases:
733 * - acq/rel failure and qunit isn't changed,
734 * but not for quota recovery.
735 * - local dqacq/dqrel.
736 * - local disk io failure.
740 GOTO(out1, err = -ENOMEM);
741 qdata_to_oqaq(qdata, oqaq);
742 /* adjust the qunit size in slaves */
743 rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
746 CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
747 GOTO(out1, err = rc1);
749 if (err || (rc < 0 && rc != -EBUSY && rc1 == 0) || is_master(qctxt))
752 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880 &&
753 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_DELAY_REL))
756 /* reschedule another dqacq/dqrel if needed */
758 qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
759 rc1 = check_cur_qunit(obd, qctxt, qdata);
762 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
763 rc1 = schedule_dqacq(obd, qctxt, qdata, opc, 0, NULL);
764 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
767 /* this is for alloc_qunit() */
772 struct dqacq_async_args {
773 struct lustre_quota_ctxt *aa_ctxt;
774 struct lustre_qunit *aa_qunit;
777 static int dqacq_interpret(const struct lu_env *env,
778 struct ptlrpc_request *req, void *data, int rc)
780 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
781 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
782 struct obd_device_target *obt = qctxt->lqc_obt;
783 struct lustre_qunit *qunit = aa->aa_qunit;
784 struct obd_device *obd = req->rq_import->imp_obd;
785 struct qunit_data *qdata = NULL;
789 LASSERT(req->rq_import);
791 down_read(&obt->obt_rwsem);
792 /* if a quota req timeouts or is dropped, we should update quota
793 * statistics which will be handled in dqacq_completion. And in
794 * this situation we should get qdata from request instead of
796 qdata = quota_get_qdata(req, (rc != 0) ? QUOTA_REQUEST : QUOTA_REPLY,
800 DEBUG_REQ(D_ERROR, req,
801 "error unpacking qunit_data(rc: %ld)\n",
803 qdata = &qunit->lq_data;
806 QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
807 QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
809 if (qdata->qd_id != qunit->lq_data.qd_id ||
810 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RET_QDATA)) {
811 CERROR("the returned qd_id isn't expected!"
812 "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
813 qunit->lq_data.qd_id);
814 qdata->qd_id = qunit->lq_data.qd_id;
817 if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
818 CERROR("the returned grp/usr isn't expected!"
819 "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
820 qunit->lq_data.qd_flags);
821 if (QDATA_IS_GRP(&qunit->lq_data))
822 QDATA_SET_GRP(qdata);
824 QDATA_CLR_GRP(qdata);
827 if (qdata->qd_count > qunit->lq_data.qd_count) {
828 CERROR("the returned qd_count isn't expected!"
829 "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
830 qunit->lq_data.qd_count);
834 if (unlikely(rc == -ESRCH))
835 CERROR("quota for %s has been enabled by master, but disabled "
836 "by slave.\n", QDATA_IS_GRP(qdata) ? "group" : "user");
838 rc = dqacq_completion(obd, qctxt, qdata, rc,
839 lustre_msg_get_opc(req->rq_reqmsg));
841 up_read(&obt->obt_rwsem);
846 * check if quota master is online
848 int check_qm(struct lustre_quota_ctxt *qctxt)
853 spin_lock(&qctxt->lqc_lock);
854 /* quit waiting when mds is back or qctxt is cleaned up */
855 rc = qctxt->lqc_import || !qctxt->lqc_valid;
856 spin_unlock(&qctxt->lqc_lock);
861 /* wake up all waiting threads when lqc_import is NULL */
862 void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
864 struct lustre_qunit *qunit, *tmp;
868 spin_lock(&qunit_hash_lock);
869 for (i = 0; i < NR_DQHASH; i++) {
870 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
871 if (qunit->lq_ctxt != qctxt)
874 /* Wake up all waiters. Do not change lq_state.
875 * The waiters will check lq_rc which is kept as 0
876 * if no others change it, then the waiters will return
877 * -EAGAIN to caller who can perform related quota
878 * acq/rel if necessary. */
879 wake_up_all(&qunit->lq_waitq);
882 spin_unlock(&qunit_hash_lock);
886 static int got_qunit(struct lustre_qunit *qunit, int is_master)
888 struct lustre_quota_ctxt *qctxt = qunit->lq_ctxt;
892 spin_lock(&qunit->lq_lock);
893 switch (qunit->lq_state) {
895 case QUNIT_RM_FROM_HASH:
901 CERROR("invalid qunit state %d\n", qunit->lq_state);
903 spin_unlock(&qunit->lq_lock);
906 spin_lock(&qctxt->lqc_lock);
907 rc = !qctxt->lqc_valid;
909 rc |= !qctxt->lqc_import;
910 spin_unlock(&qctxt->lqc_lock);
917 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
918 struct qunit_data *qdata, int opc, int wait,
919 struct obd_trans_info *oti)
921 struct lustre_qunit *qunit, *empty;
922 struct l_wait_info lwi = { 0 };
923 struct ptlrpc_request *req;
924 struct dqacq_async_args *aa;
925 struct obd_import *imp = NULL;
926 struct lustre_qunit_size *lqs = NULL;
927 struct timeval work_start;
928 struct timeval work_end;
933 LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
934 do_gettimeofday(&work_start);
935 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
938 spin_lock(&qunit_hash_lock);
939 qunit = dqacq_in_flight(qctxt, qdata);
941 spin_unlock(&qunit_hash_lock);
944 goto wait_completion;
948 insert_qunit_nolock(qctxt, qunit);
949 spin_unlock(&qunit_hash_lock);
951 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
953 if (lqs && !IS_ERR(lqs)) {
954 spin_lock(&lqs->lqs_lock);
955 quota_compute_lqs(qdata, lqs, 1, (opc == QUOTA_DQACQ) ? 1 : 0);
956 /* when this qdata returned from mds, it will call lqs_putref */
958 spin_unlock(&lqs->lqs_lock);
959 /* this is for quota_search_lqs */
962 CERROR("Can't find the lustre qunit size!\n");
965 QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
966 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
967 /* master is going to dqacq/dqrel from itself */
968 if (is_master(qctxt)) {
970 QDATA_DEBUG(qdata, "local %s.\n",
971 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
972 QDATA_SET_CHANGE_QS(qdata);
973 rc = qctxt->lqc_handler(obd, qdata, opc);
974 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
975 /* this is for qunit_get() */
978 do_gettimeofday(&work_end);
979 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
980 if (opc == QUOTA_DQACQ)
981 lprocfs_counter_add(qctxt->lqc_stats,
982 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
985 lprocfs_counter_add(qctxt->lqc_stats,
986 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
988 RETURN(rc ? rc : rc2);
991 spin_lock(&qctxt->lqc_lock);
992 if (!qctxt->lqc_import) {
993 spin_unlock(&qctxt->lqc_lock);
994 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
996 spin_lock(&qunit_hash_lock);
997 remove_qunit_nolock(qunit);
998 spin_unlock(&qunit_hash_lock);
1000 compute_lqs_after_removing_qunit(qunit);
1002 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
1003 wake_up_all(&qunit->lq_waitq);
1005 /* this is for qunit_get() */
1007 /* this for alloc_qunit() */
1009 spin_lock(&qctxt->lqc_lock);
1010 if (wait && !qctxt->lqc_import) {
1011 spin_unlock(&qctxt->lqc_lock);
1013 LASSERT(oti && oti->oti_thread &&
1014 oti->oti_thread->t_watchdog);
1016 lc_watchdog_disable(oti->oti_thread->t_watchdog);
1017 CDEBUG(D_QUOTA, "sleep for quota master\n");
1018 l_wait_event(qctxt->lqc_wait_for_qmaster,
1019 check_qm(qctxt), &lwi);
1020 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
1021 lc_watchdog_touch(oti->oti_thread->t_watchdog,
1022 GET_TIMEOUT(oti->oti_thread->t_svc));
1024 spin_unlock(&qctxt->lqc_lock);
1029 imp = class_import_get(qctxt->lqc_import);
1030 spin_unlock(&qctxt->lqc_lock);
1032 /* build dqacq/dqrel request */
1035 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_QUOTA_DQACQ,
1036 LUSTRE_MDS_VERSION, opc);
1037 class_import_put(imp);
1039 CERROR("Can't alloc request\n");
1040 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
1041 /* this is for qunit_get() */
1046 ptlrpc_request_set_replen(req);
1047 req->rq_no_resend = req->rq_no_delay = 1;
1048 rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
1050 CERROR("Can't pack qunit_data(rc: %d)\n", rc);
1051 ptlrpc_req_finished(req);
1052 dqacq_completion(obd, qctxt, qdata, -EPROTO, opc);
1053 /* this is for qunit_get() */
1058 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1059 aa = ptlrpc_req_async_args(req);
1060 aa->aa_ctxt = qctxt;
1061 aa->aa_qunit = qunit;
1063 req->rq_interpret_reply = dqacq_interpret;
1064 ptlrpcd_add_req(req, PSCOPE_OTHER);
1066 QDATA_DEBUG(qdata, "%s scheduled.\n",
1067 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
1069 if (wait && qunit) {
1070 struct qunit_data *p = &qunit->lq_data;
1072 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1073 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1075 /* rc = -EAGAIN, it means the quota master isn't ready yet
1076 * rc = QUOTA_REQ_RETURNED, it means a quota req is finished;
1077 * rc = -EDQUOT, it means out of quota
1078 * rc = -EBUSY, it means recovery is happening
1079 * other rc < 0, it means real errors, functions who call
1080 * schedule_dqacq should take care of this */
1081 spin_lock(&qunit->lq_lock);
1083 spin_unlock(&qunit->lq_lock);
1084 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
1085 "rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
1086 qunit->lq_data.qd_flags, rc, qunit->lq_owner);
1090 do_gettimeofday(&work_end);
1091 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1092 if (opc == QUOTA_DQACQ)
1093 lprocfs_counter_add(qctxt->lqc_stats,
1094 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1097 lprocfs_counter_add(qctxt->lqc_stats,
1098 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1105 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
1106 const unsigned int id[], __u32 isblk, int wait,
1107 struct obd_trans_info *oti)
1109 int rc = 0, i = USRQUOTA;
1110 struct qunit_data qdata[MAXQUOTAS];
1113 if (quota_is_set(obd, id, isblk ? QB_SET : QI_SET) == 0)
1116 for (i = 0; i < MAXQUOTAS; i++) {
1117 qdata[i].qd_id = id[i];
1118 qdata[i].qd_flags = i;
1120 QDATA_SET_BLK(&qdata[i]);
1121 qdata[i].qd_count = 0;
1123 rc = check_cur_qunit(obd, qctxt, &qdata[i]);
1126 /* need acquire or release */
1127 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1128 rc = schedule_dqacq(obd, qctxt, &qdata[i], opc,
1132 } else if (wait == 1) {
1133 /* when wait equates 1, that means mds_quota_acquire
1134 * or filter_quota_acquire is calling it. */
1135 rc = qctxt_wait_pending_dqacq(qctxt, id[i], i, isblk);
1145 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
1146 unsigned short type, int isblk)
1148 struct lustre_qunit *qunit = NULL;
1149 struct qunit_data qdata;
1150 struct timeval work_start;
1151 struct timeval work_end;
1153 struct l_wait_info lwi = { 0 };
1157 do_gettimeofday(&work_start);
1159 qdata.qd_flags = type;
1161 QDATA_SET_BLK(&qdata);
1164 spin_lock(&qunit_hash_lock);
1165 qunit = dqacq_in_flight(qctxt, &qdata);
1166 spin_unlock(&qunit_hash_lock);
1169 struct qunit_data *p = &qunit->lq_data;
1171 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1172 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1174 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
1175 "owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
1176 /* keep same as schedule_dqacq() b=17030 */
1177 spin_lock(&qunit->lq_lock);
1179 spin_unlock(&qunit->lq_lock);
1180 /* this is for dqacq_in_flight() */
1182 do_gettimeofday(&work_end);
1183 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1184 lprocfs_counter_add(qctxt->lqc_stats,
1185 isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
1186 LQUOTA_WAIT_PENDING_INO_QUOTA,
1189 do_gettimeofday(&work_end);
1190 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1191 lprocfs_counter_add(qctxt->lqc_stats,
1192 isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
1193 LQUOTA_NOWAIT_PENDING_INO_QUOTA,
1201 qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
1203 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
1204 struct obd_device_target *obt = &obd->u.obt;
1205 struct super_block *sb = obt->obt_sb;
1211 rc = ptlrpcd_addref();
1215 cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
1216 cfs_waitq_init(&qctxt->lqc_lqs_waitq);
1217 atomic_set(&qctxt->lqc_lqs, 0);
1218 spin_lock_init(&qctxt->lqc_lock);
1219 spin_lock(&qctxt->lqc_lock);
1220 qctxt->lqc_handler = handler;
1222 qctxt->lqc_obt = obt;
1223 qctxt->lqc_import = NULL;
1224 qctxt->lqc_recovery = 0;
1225 qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
1226 qctxt->lqc_valid = 1;
1227 qctxt->lqc_cqs_boundary_factor = 4;
1228 qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
1229 qctxt->lqc_cqs_least_iunit = 2;
1230 qctxt->lqc_cqs_qs_factor = 2;
1231 qctxt->lqc_flags = 0;
1232 QUOTA_MASTER_UNREADY(qctxt);
1233 qctxt->lqc_bunit_sz = default_bunit_sz;
1234 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
1235 qctxt->lqc_iunit_sz = default_iunit_sz;
1236 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
1237 qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
1238 * after the last shrinking */
1239 qctxt->lqc_sync_blk = 0;
1240 spin_unlock(&qctxt->lqc_lock);
1242 qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
1245 &lqs_hash_ops, CFS_HASH_REHASH);
1246 if (!qctxt->lqc_lqs_hash) {
1247 CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
1252 rc = lquota_proc_setup(obd, is_master(qctxt));
1254 CERROR("initialize proc for %s error!\n", obd->obd_name);
1260 static int check_lqs(struct lustre_quota_ctxt *qctxt)
1265 rc = !atomic_read(&qctxt->lqc_lqs);
1271 void hash_put_lqs(void *obj, void *data)
1273 lqs_putref((struct lustre_qunit_size *)obj);
1276 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
1278 struct lustre_qunit *qunit, *tmp;
1279 struct list_head tmp_list;
1280 struct l_wait_info lwi = { 0 };
1281 struct obd_device_target *obt = qctxt->lqc_obt;
1285 CFS_INIT_LIST_HEAD(&tmp_list);
1287 spin_lock(&qctxt->lqc_lock);
1288 qctxt->lqc_valid = 0;
1289 spin_unlock(&qctxt->lqc_lock);
1291 spin_lock(&qunit_hash_lock);
1292 for (i = 0; i < NR_DQHASH; i++) {
1293 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
1294 if (qunit->lq_ctxt != qctxt)
1296 remove_qunit_nolock(qunit);
1297 list_add(&qunit->lq_hash, &tmp_list);
1300 spin_unlock(&qunit_hash_lock);
1302 list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
1303 list_del_init(&qunit->lq_hash);
1304 compute_lqs_after_removing_qunit(qunit);
1306 /* wake up all waiters */
1307 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
1308 wake_up_all(&qunit->lq_waitq);
1312 /* after qctxt_cleanup, qctxt might be freed, then check_qm() is
1313 * unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
1314 while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
1315 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
1316 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
1317 cfs_time_seconds(1));
1320 cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, hash_put_lqs, NULL);
1321 l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
1322 down_write(&obt->obt_rwsem);
1323 cfs_hash_destroy(qctxt->lqc_lqs_hash);
1324 qctxt->lqc_lqs_hash = NULL;
1325 up_write(&obt->obt_rwsem);
1330 if (lquota_proc_cleanup(qctxt))
1331 CERROR("cleanup proc error!\n");
1337 struct qslave_recov_thread_data {
1338 struct obd_device *obd;
1339 struct lustre_quota_ctxt *qctxt;
1340 struct completion comp;
1343 /* FIXME only recovery block quota by now */
1344 static int qslave_recovery_main(void *arg)
1346 struct qslave_recov_thread_data *data = arg;
1347 struct obd_device *obd = data->obd;
1348 struct lustre_quota_ctxt *qctxt = data->qctxt;
1353 cfs_daemonize_ctxt("qslave_recovd");
1356 class_incref(obd, "qslave_recovd_filter", obd);
1358 complete(&data->comp);
1360 spin_lock(&qctxt->lqc_lock);
1361 if (qctxt->lqc_recovery) {
1362 spin_unlock(&qctxt->lqc_lock);
1363 class_decref(obd, "qslave_recovd_filter", obd);
1366 qctxt->lqc_recovery = 1;
1367 spin_unlock(&qctxt->lqc_lock);
1370 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1371 struct qunit_data qdata;
1372 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1373 struct list_head id_list;
1374 struct dquot_id *dqid, *tmp;
1377 LOCK_DQONOFF_MUTEX(dqopt);
1378 if (!ll_sb_has_quota_active(qctxt->lqc_sb, type)) {
1379 UNLOCK_DQONOFF_MUTEX(dqopt);
1383 LASSERT(dqopt->files[type] != NULL);
1384 CFS_INIT_LIST_HEAD(&id_list);
1385 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1386 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1388 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1390 UNLOCK_DQONOFF_MUTEX(dqopt);
1392 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1394 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1395 list_del_init(&dqid->di_link);
1396 /* skip slave recovery on itself */
1397 if (is_master(qctxt))
1399 if (rc && rc != -EBUSY)
1402 qdata.qd_id = dqid->di_id;
1403 qdata.qd_flags = type;
1404 QDATA_SET_BLK(&qdata);
1407 ret = check_cur_qunit(obd, qctxt, &qdata);
1410 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1411 rc = schedule_dqacq(obd, qctxt, &qdata, opc,
1419 if (rc && rc != -EBUSY)
1420 CERROR("qslave recovery failed! (id:%d type:%d "
1421 " rc:%d)\n", dqid->di_id, type, rc);
1427 spin_lock(&qctxt->lqc_lock);
1428 qctxt->lqc_recovery = 0;
1429 spin_unlock(&qctxt->lqc_lock);
1430 class_decref(obd, "qslave_recovd_filter", obd);
1435 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1437 struct qslave_recov_thread_data data;
1441 if (!ll_sb_any_quota_active(qctxt->lqc_sb))
1446 init_completion(&data.comp);
1448 rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
1450 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1453 wait_for_completion(&data.comp);
1458 int quota_is_on(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
1462 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1463 if (!Q_TYPESET(oqctl, type))
1465 if (!(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)))
1471 int quota_is_off(struct lustre_quota_ctxt *qctxt, struct obd_quotactl *oqctl)
1475 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1476 if (!Q_TYPESET(oqctl, type))
1478 if (qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type))
1485 * When quotaon, build a lqs for every uid/gid who has been set limitation
1486 * for quota. After quota_search_lqs, it will hold one ref for the lqs.
1487 * It will be released when qctxt_cleanup() is executed b=18574
1489 * Should be called with obt->obt_quotachecking held. b=20152
1491 void build_lqs(struct obd_device *obd)
1493 struct obd_device_target *obt = &obd->u.obt;
1494 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
1495 struct list_head id_list;
1498 LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
1499 INIT_LIST_HEAD(&id_list);
1500 for (i = 0; i < MAXQUOTAS; i++) {
1501 struct dquot_id *dqid, *tmp;
1503 if (sb_dqopt(qctxt->lqc_sb)->files[i] == NULL)
1506 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1507 rc = fsfilt_qids(obd, sb_dqopt(qctxt->lqc_sb)->files[i], NULL,
1510 rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
1514 CERROR("%s: failed to get %s qids!\n", obd->obd_name,
1515 i ? "group" : "user");
1519 list_for_each_entry_safe(dqid, tmp, &id_list,
1521 struct lustre_qunit_size *lqs;
1523 list_del_init(&dqid->di_link);
1524 lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
1526 if (lqs && !IS_ERR(lqs)) {
1527 lqs->lqs_flags |= dqid->di_flag;
1530 CERROR("%s: failed to create a lqs for %sid %u"
1531 "\n", obd->obd_name, i ? "g" : "u",
1541 * lqs<->qctxt hash operations
1545 * string hashing using djb2 hash algorithm
1548 lqs_hash(cfs_hash_t *hs, void *key, unsigned mask)
1550 struct quota_adjust_qunit *lqs_key;
1555 lqs_key = (struct quota_adjust_qunit *)key;
1556 hash = (QAQ_IS_GRP(lqs_key) ? 5381 : 5387) * lqs_key->qaq_id;
1558 RETURN(hash & mask);
1562 lqs_compare(void *key, struct hlist_node *hnode)
1564 struct lustre_qunit_size *q;
1569 q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1571 spin_lock(&q->lqs_lock);
1572 rc = (q->lqs_key == *((unsigned long long *)key));
1573 spin_unlock(&q->lqs_lock);
1579 lqs_get(struct hlist_node *hnode)
1581 struct lustre_qunit_size *q =
1582 hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1591 lqs_put(struct hlist_node *hnode)
1593 struct lustre_qunit_size *q =
1594 hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1603 lqs_exit(struct hlist_node *hnode)
1605 struct lustre_qunit_size *q =
1606 hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1610 * Nothing should be left. User of lqs put it and
1611 * lqs also was deleted from table by this time
1612 * so we should have 0 refs.
1614 LASSERTF(atomic_read(&q->lqs_refcount) == 0,
1615 "Busy lqs %p with %d refs\n", q,
1616 atomic_read(&q->lqs_refcount));
1621 static cfs_hash_ops_t lqs_hash_ops = {
1622 .hs_hash = lqs_hash,
1623 .hs_compare = lqs_compare,
1628 #endif /* HAVE_QUOTA_SUPPORT */