1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
39 * lustre/quota/quota_context.c
41 * Lustre Quota Context
43 * Author: Niu YaWei <niu@clusterfs.com>
47 # define EXPORT_SYMTAB
50 #define DEBUG_SUBSYSTEM S_LQUOTA
52 #include <linux/version.h>
54 #include <asm/unistd.h>
55 #include <linux/slab.h>
56 #include <linux/quotaops.h>
57 #include <linux/module.h>
58 #include <linux/init.h>
60 #include <obd_class.h>
61 #include <lustre_quota.h>
62 #include <lustre_fsfilt.h>
63 #include <lprocfs_status.h>
64 #include "quota_internal.h"
66 static int hash_lqs_cur_bits = HASH_LQS_CUR_BITS;
67 CFS_MODULE_PARM(hash_lqs_cur_bits, "i", int, 0444,
68 "the current bits of lqs hash");
70 #ifdef HAVE_QUOTA_SUPPORT
72 static cfs_hash_ops_t lqs_hash_ops;
74 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
75 unsigned long default_btune_ratio = 50; /* 50 percentage */
76 unsigned long default_iunit_sz = 5120; /* 5120 inodes */
77 unsigned long default_itune_ratio = 50; /* 50 percentage */
79 cfs_mem_cache_t *qunit_cachep = NULL;
80 cfs_list_t qunit_hash[NR_DQHASH];
81 cfs_spinlock_t qunit_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
83 /* please sync qunit_state with qunit_state_names */
90 * a qunit is added into qunit hash, that means
91 * a quota req will be sent or is flying
95 * a qunit is removed from qunit hash, that
96 * means a quota req is handled and comes back
98 QUNIT_RM_FROM_HASH = 2,
100 * qunit can wake up all threads waiting for it
105 static const char *qunit_state_names[] = {
106 [QUNIT_CREATED] = "CREATED",
107 [QUNIT_IN_HASH] = "IN_HASH",
108 [QUNIT_RM_FROM_HASH] = "RM_FROM_HASH",
109 [QUNIT_FINISHED] = "FINISHED",
112 struct lustre_qunit {
113 cfs_list_t lq_hash; /** Hash list in memory */
114 cfs_atomic_t lq_refcnt; /** Use count */
115 struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
116 struct qunit_data lq_data; /** See qunit_data */
117 unsigned int lq_opc; /** QUOTA_DQACQ, QUOTA_DQREL */
118 cfs_waitq_t lq_waitq; /** Threads waiting for this qunit */
119 cfs_spinlock_t lq_lock; /** Protect the whole structure */
120 enum qunit_state lq_state; /** Present the status of qunit */
121 int lq_rc; /** The rc of lq_data */
125 #define QUNIT_SET_STATE(qunit, state) \
127 cfs_spin_lock(&qunit->lq_lock); \
128 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
129 "lq_rc(%d), lq_owner(%d)\n", \
130 qunit, qunit_state_names[qunit->lq_state], \
131 qunit_state_names[state], qunit->lq_rc, \
133 qunit->lq_state = state; \
134 cfs_spin_unlock(&qunit->lq_lock); \
137 #define QUNIT_SET_STATE_AND_RC(qunit, state, rc) \
139 cfs_spin_lock(&qunit->lq_lock); \
141 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
142 "lq_rc(%d), lq_owner(%d)\n", \
143 qunit, qunit_state_names[qunit->lq_state], \
144 qunit_state_names[state], qunit->lq_rc, \
146 qunit->lq_state = state; \
147 cfs_spin_unlock(&qunit->lq_lock); \
150 int should_translate_quota (struct obd_import *imp)
155 if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
161 void qunit_cache_cleanup(void)
166 cfs_spin_lock(&qunit_hash_lock);
167 for (i = 0; i < NR_DQHASH; i++)
168 LASSERT(cfs_list_empty(qunit_hash + i));
169 cfs_spin_unlock(&qunit_hash_lock);
173 rc = cfs_mem_cache_destroy(qunit_cachep);
174 LASSERTF(rc == 0, "couldn't destroy qunit_cache slab\n");
180 int qunit_cache_init(void)
185 LASSERT(qunit_cachep == NULL);
186 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
187 sizeof(struct lustre_qunit),
192 cfs_spin_lock(&qunit_hash_lock);
193 for (i = 0; i < NR_DQHASH; i++)
194 CFS_INIT_LIST_HEAD(qunit_hash + i);
195 cfs_spin_unlock(&qunit_hash_lock);
200 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
201 __attribute__((__const__));
204 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
206 unsigned int id = qdata->qd_id;
207 unsigned int type = QDATA_IS_GRP(qdata);
209 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
210 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
214 /* caller must hold qunit_hash_lock */
215 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
216 struct lustre_quota_ctxt *qctxt,
217 struct qunit_data *qdata)
219 struct lustre_qunit *qunit = NULL;
220 struct qunit_data *tmp;
222 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
223 cfs_list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
224 tmp = &qunit->lq_data;
225 if (qunit->lq_ctxt == qctxt &&
226 qdata->qd_id == tmp->qd_id &&
227 (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
228 (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
234 /* check_cur_qunit - check the current usage of qunit.
235 * @qctxt: quota context
236 * @qdata: the type of quota unit to be checked
238 * return: 1 - need acquire qunit;
239 * 2 - need release qunit;
240 * 0 - need do nothing.
244 check_cur_qunit(struct obd_device *obd,
245 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
247 struct super_block *sb = qctxt->lqc_sb;
248 unsigned long qunit_sz, tune_sz;
249 __u64 usage, limit, limit_org, pending_write = 0;
250 long long record = 0;
251 struct obd_quotactl *qctl;
252 struct lustre_qunit_size *lqs = NULL;
256 if (!ll_sb_any_quota_active(sb))
259 cfs_spin_lock(&qctxt->lqc_lock);
260 if (!qctxt->lqc_valid){
261 cfs_spin_unlock(&qctxt->lqc_lock);
264 cfs_spin_unlock(&qctxt->lqc_lock);
270 /* get fs quota usage & limit */
271 qctl->qc_cmd = Q_GETQUOTA;
272 qctl->qc_id = qdata->qd_id;
273 qctl->qc_type = QDATA_IS_GRP(qdata);
274 ret = fsfilt_quotactl(obd, sb, qctl);
276 if (ret == -ESRCH) /* no limit */
279 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
283 if (QDATA_IS_BLK(qdata)) {
284 usage = qctl->qc_dqblk.dqb_curspace;
285 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
287 usage = qctl->qc_dqblk.dqb_curinodes;
288 limit = qctl->qc_dqblk.dqb_ihardlimit;
291 /* ignore the no quota limit case; and it can avoid creating
292 * unnecessary lqs for uid/gid */
296 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
298 if (IS_ERR(lqs) || lqs == NULL) {
299 CERROR("fail to find a lqs for %sid: %u)!\n",
300 QDATA_IS_GRP(qdata) ? "g" : "u", qdata->qd_id);
303 cfs_spin_lock(&lqs->lqs_lock);
305 if (QDATA_IS_BLK(qdata)) {
306 qunit_sz = lqs->lqs_bunit_sz;
307 tune_sz = lqs->lqs_btune_sz;
308 pending_write = lqs->lqs_bwrite_pending;
309 record = lqs->lqs_blk_rec;
310 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
312 /* we didn't need change inode qunit size now */
313 qunit_sz = lqs->lqs_iunit_sz;
314 tune_sz = lqs->lqs_itune_sz;
315 pending_write = lqs->lqs_iwrite_pending;
316 record = lqs->lqs_ino_rec;
319 /* we don't count the MIN_QLIMIT */
320 if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
321 (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
324 usage += pending_write;
326 /* when a releasing quota req is sent, before it returned
327 limit is assigned a small value. limit will overflow */
333 LASSERT(qdata->qd_count == 0);
334 if (limit <= usage + tune_sz) {
335 while (qdata->qd_count + limit <=
337 qdata->qd_count += qunit_sz;
339 } else if (limit > usage + qunit_sz + tune_sz &&
340 limit_org > qdata->qd_count + qunit_sz) {
341 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
342 limit_org > qdata->qd_count + qunit_sz)
343 qdata->qd_count += qunit_sz;
345 /* if there are other pending writes for this uid/gid, releasing
346 * quota is put off until the last pending write b=16645 */
347 /* if there is an ongoing quota request, a releasing request is aborted.
348 * That ongoing quota request will call this function again when
349 * it returned b=18630 */
350 if (pending_write || record) {
351 CDEBUG(D_QUOTA, "delay quota release\n");
356 quota_compute_lqs(qdata, lqs, 1, (ret == 1) ? 1 : 0);
358 CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
359 ", pending_write: "LPU64", record: %lld"
360 ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
361 QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
362 record, qunit_sz, tune_sz, ret);
363 LASSERT(ret == 0 || qdata->qd_count);
365 cfs_spin_unlock(&lqs->lqs_lock);
375 * Compute the remaining quota for certain gid or uid b=11693
377 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
378 struct qunit_data *qdata, int isblk)
380 struct super_block *sb = qctxt->lqc_sb;
382 struct obd_quotactl *qctl;
383 int ret = QUOTA_RET_OK;
386 if (!ll_sb_any_quota_active(sb))
387 RETURN(QUOTA_RET_NOQUOTA);
389 /* ignore root user */
390 if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
391 RETURN(QUOTA_RET_NOLIMIT);
397 /* get fs quota usage & limit */
398 qctl->qc_cmd = Q_GETQUOTA;
399 qctl->qc_id = qdata->qd_id;
400 qctl->qc_type = QDATA_IS_GRP(qdata);
401 ret = fsfilt_quotactl(obd, sb, qctl);
403 if (ret == -ESRCH) /* no limit */
404 ret = QUOTA_RET_NOLIMIT;
406 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
411 usage = isblk ? qctl->qc_dqblk.dqb_curspace :
412 qctl->qc_dqblk.dqb_curinodes;
413 limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
414 qctl->qc_dqblk.dqb_ihardlimit;
415 if (!limit){ /* no limit */
416 ret = QUOTA_RET_NOLIMIT;
421 qdata->qd_count = limit - usage;
430 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
431 struct qunit_data *qdata, int opc)
433 struct lustre_qunit *qunit = NULL;
436 OBD_SLAB_ALLOC_PTR_GFP(qunit, qunit_cachep, CFS_ALLOC_IO);
440 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
441 cfs_waitq_init(&qunit->lq_waitq);
442 cfs_atomic_set(&qunit->lq_refcnt, 1);
443 qunit->lq_ctxt = qctxt;
444 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
446 qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
447 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
448 qunit->lq_owner = cfs_curproc_pid();
452 static inline void free_qunit(struct lustre_qunit *qunit)
454 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
457 static inline void qunit_get(struct lustre_qunit *qunit)
459 cfs_atomic_inc(&qunit->lq_refcnt);
462 static void qunit_put(struct lustre_qunit *qunit)
464 LASSERT(cfs_atomic_read(&qunit->lq_refcnt));
465 if (cfs_atomic_dec_and_test(&qunit->lq_refcnt))
469 /* caller must hold qunit_hash_lock and release ref of qunit after using it */
470 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
471 struct qunit_data *qdata)
473 unsigned int hashent = qunit_hashfn(qctxt, qdata);
474 struct lustre_qunit *qunit;
477 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
478 qunit = find_qunit(hashent, qctxt, qdata);
485 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
489 LASSERT(cfs_list_empty(&qunit->lq_hash));
491 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
492 cfs_list_add(&qunit->lq_hash, head);
493 QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
496 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
498 struct lustre_qunit_size *lqs;
500 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(&qunit->lq_data),
501 qunit->lq_data.qd_id),
503 if (lqs && !IS_ERR(lqs)) {
504 cfs_spin_lock(&lqs->lqs_lock);
505 if (qunit->lq_opc == QUOTA_DQACQ)
506 quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
507 if (qunit->lq_opc == QUOTA_DQREL)
508 quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
509 cfs_spin_unlock(&lqs->lqs_lock);
510 /* this is for quota_search_lqs */
512 /* this is for schedule_dqacq */
517 static void remove_qunit_nolock(struct lustre_qunit *qunit)
519 LASSERT(!cfs_list_empty(&qunit->lq_hash));
520 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
522 cfs_list_del_init(&qunit->lq_hash);
523 QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
527 void* quota_barrier(struct lustre_quota_ctxt *qctxt,
528 struct obd_quotactl *oqctl, int isblk)
530 struct lustre_qunit *qunit, *find_qunit;
533 OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
535 CERROR("locating %sunit failed for %sid %u\n",
536 isblk ? "b" : "i", oqctl->qc_type ? "g" : "u",
538 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
539 oqctl->qc_type, isblk);
543 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
544 qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
545 cfs_waitq_init(&qunit->lq_waitq);
546 cfs_atomic_set(&qunit->lq_refcnt, 1);
547 qunit->lq_ctxt = qctxt;
548 qunit->lq_data.qd_id = oqctl->qc_id;
549 qunit->lq_data.qd_flags = oqctl->qc_type;
551 QDATA_SET_BLK(&qunit->lq_data);
552 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
553 /* it means it is only an invalid qunit for barrier */
554 qunit->lq_opc = QUOTA_LAST_OPC;
557 cfs_spin_lock(&qunit_hash_lock);
558 find_qunit = dqacq_in_flight(qctxt, &qunit->lq_data);
560 cfs_spin_unlock(&qunit_hash_lock);
561 qunit_put(find_qunit);
562 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
563 oqctl->qc_type, isblk);
564 CDEBUG(D_QUOTA, "cycle=%d\n", cycle++);
569 insert_qunit_nolock(qctxt, qunit);
570 cfs_spin_unlock(&qunit_hash_lock);
574 void quota_unbarrier(void *handle)
576 struct lustre_qunit *qunit = (struct lustre_qunit *)handle;
579 CERROR("handle is NULL\n");
583 LASSERT(qunit->lq_opc == QUOTA_LAST_OPC);
584 cfs_spin_lock(&qunit_hash_lock);
585 remove_qunit_nolock(qunit);
586 cfs_spin_unlock(&qunit_hash_lock);
587 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, QUOTA_REQ_RETURNED);
588 cfs_waitq_signal(&qunit->lq_waitq);
592 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
593 (limit = count) : (limit += count)
596 static inline int is_master(struct lustre_quota_ctxt *qctxt)
598 return qctxt->lqc_handler ? 1 : 0;
602 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
603 struct qunit_data *qdata, int opc, int wait,
604 struct obd_trans_info *oti);
606 static inline void qdata_to_oqaq(struct qunit_data *qdata,
607 struct quota_adjust_qunit *oqaq)
612 oqaq->qaq_flags = qdata->qd_flags;
613 oqaq->qaq_id = qdata->qd_id;
614 if (QDATA_IS_ADJBLK(qdata))
615 oqaq->qaq_bunit_sz = qdata->qd_qunit;
616 if (QDATA_IS_ADJINO(qdata))
617 oqaq->qaq_iunit_sz = qdata->qd_qunit;
621 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
622 struct qunit_data *qdata, int rc, int opc)
624 struct lustre_qunit *qunit = NULL;
625 struct super_block *sb = qctxt->lqc_sb;
627 struct quota_adjust_qunit *oqaq = NULL;
632 QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
633 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
635 /* do it only when a releasing quota req more than 5MB b=18491 */
636 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880)
637 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
639 /* update local operational quota file */
641 __u64 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
642 struct obd_quotactl *qctl;
647 GOTO(out, err = -ENOMEM);
649 /* acq/rel qunit for specified uid/gid is serialized,
650 * so there is no race between get fs quota limit and
651 * set fs quota limit */
652 qctl->qc_cmd = Q_GETQUOTA;
653 qctl->qc_id = qdata->qd_id;
654 qctl->qc_type = QDATA_IS_GRP(qdata);
655 err = fsfilt_quotactl(obd, sb, qctl);
657 CERROR("error get quota fs limit! (rc:%d)\n", err);
661 if (QDATA_IS_BLK(qdata)) {
662 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
663 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
665 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
666 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
669 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
676 INC_QLIMIT(*hardlimit, count);
679 LASSERTF(count < *hardlimit,
680 "id(%u) flag(%u) type(%c) isblk(%c) "
681 "count("LPU64") qd_qunit("LPU64") "
682 "hardlimit("LPU64").\n",
683 qdata->qd_id, qdata->qd_flags,
684 QDATA_IS_GRP(qdata) ? 'g' : 'u',
685 QDATA_IS_BLK(qdata) ? 'b': 'i',
686 qdata->qd_count, qdata->qd_qunit, *hardlimit);
693 /* clear quota limit */
697 qctl->qc_cmd = Q_SETQUOTA;
698 err = fsfilt_quotactl(obd, sb, qctl);
700 CERROR("error set quota fs limit! (rc:%d)\n", err);
702 QDATA_DEBUG(qdata, "%s completion\n",
703 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
706 } else if (rc == -EDQUOT) {
707 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
708 } else if (rc == -EBUSY) {
709 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
711 CERROR("acquire qunit got error! (rc:%d)\n", rc);
714 /* remove the qunit from hash */
715 cfs_spin_lock(&qunit_hash_lock);
717 qunit = dqacq_in_flight(qctxt, qdata);
718 /* this qunit has been removed by qctxt_cleanup() */
720 cfs_spin_unlock(&qunit_hash_lock);
721 QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
722 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
726 LASSERT(opc == qunit->lq_opc);
727 /* remove this qunit from lq_hash so that new processes cannot be added
728 * to qunit->lq_waiters */
729 remove_qunit_nolock(qunit);
730 cfs_spin_unlock(&qunit_hash_lock);
732 compute_lqs_after_removing_qunit(qunit);
735 rc = QUOTA_REQ_RETURNED;
736 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
737 /* wake up all waiters */
738 cfs_waitq_broadcast(&qunit->lq_waitq);
740 /* this is for dqacq_in_flight() */
742 if (rc < 0 && rc != -EDQUOT)
745 /* don't reschedule in such cases:
746 * - acq/rel failure and qunit isn't changed,
747 * but not for quota recovery.
748 * - local dqacq/dqrel.
749 * - local disk io failure.
753 GOTO(out1, err = -ENOMEM);
754 qdata_to_oqaq(qdata, oqaq);
755 /* adjust the qunit size in slaves */
756 rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
759 CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
760 GOTO(out1, err = rc1);
762 if (err || (rc < 0 && rc != -EBUSY && rc1 == 0) || is_master(qctxt))
765 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880 &&
766 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_DELAY_REL))
769 /* reschedule another dqacq/dqrel if needed */
771 qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
772 rc1 = check_cur_qunit(obd, qctxt, qdata);
775 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
776 rc1 = schedule_dqacq(obd, qctxt, qdata, opc, 0, NULL);
777 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
780 /* this is for alloc_qunit() */
785 struct dqacq_async_args {
786 struct lustre_quota_ctxt *aa_ctxt;
787 struct lustre_qunit *aa_qunit;
790 static int dqacq_interpret(const struct lu_env *env,
791 struct ptlrpc_request *req, void *data, int rc)
793 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
794 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
795 struct obd_device_target *obt = qctxt->lqc_obt;
796 struct lustre_qunit *qunit = aa->aa_qunit;
797 struct obd_device *obd = req->rq_import->imp_obd;
798 struct qunit_data *qdata = NULL;
802 LASSERT(req->rq_import);
804 cfs_down_read(&obt->obt_rwsem);
805 /* if a quota req timeouts or is dropped, we should update quota
806 * statistics which will be handled in dqacq_completion. And in
807 * this situation we should get qdata from request instead of
809 qdata = quota_get_qdata(req, (rc != 0) ? QUOTA_REQUEST : QUOTA_REPLY,
813 DEBUG_REQ(D_ERROR, req,
814 "error unpacking qunit_data(rc: %ld)\n",
816 qdata = &qunit->lq_data;
819 QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
820 QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
822 if (qdata->qd_id != qunit->lq_data.qd_id ||
823 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RET_QDATA)) {
824 CERROR("the returned qd_id isn't expected!"
825 "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
826 qunit->lq_data.qd_id);
827 qdata->qd_id = qunit->lq_data.qd_id;
830 if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
831 CERROR("the returned grp/usr isn't expected!"
832 "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
833 qunit->lq_data.qd_flags);
834 if (QDATA_IS_GRP(&qunit->lq_data))
835 QDATA_SET_GRP(qdata);
837 QDATA_CLR_GRP(qdata);
840 if (qdata->qd_count > qunit->lq_data.qd_count) {
841 CERROR("the returned qd_count isn't expected!"
842 "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
843 qunit->lq_data.qd_count);
847 if (unlikely(rc == -ESRCH))
848 CERROR("quota for %s has been enabled by master, but disabled "
849 "by slave.\n", QDATA_IS_GRP(qdata) ? "group" : "user");
851 rc = dqacq_completion(obd, qctxt, qdata, rc,
852 lustre_msg_get_opc(req->rq_reqmsg));
854 cfs_up_read(&obt->obt_rwsem);
859 * check if quota master is online
861 int check_qm(struct lustre_quota_ctxt *qctxt)
866 cfs_spin_lock(&qctxt->lqc_lock);
867 /* quit waiting when mds is back or qctxt is cleaned up */
868 rc = qctxt->lqc_import || !qctxt->lqc_valid;
869 cfs_spin_unlock(&qctxt->lqc_lock);
874 /* wake up all waiting threads when lqc_import is NULL */
875 void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
877 struct lustre_qunit *qunit, *tmp;
881 cfs_spin_lock(&qunit_hash_lock);
882 for (i = 0; i < NR_DQHASH; i++) {
883 cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
885 if (qunit->lq_ctxt != qctxt)
888 /* Wake up all waiters. Do not change lq_state.
889 * The waiters will check lq_rc which is kept as 0
890 * if no others change it, then the waiters will return
891 * -EAGAIN to caller who can perform related quota
892 * acq/rel if necessary. */
893 cfs_waitq_broadcast(&qunit->lq_waitq);
896 cfs_spin_unlock(&qunit_hash_lock);
900 static int got_qunit(struct lustre_qunit *qunit, int is_master)
902 struct lustre_quota_ctxt *qctxt = qunit->lq_ctxt;
906 cfs_spin_lock(&qunit->lq_lock);
907 switch (qunit->lq_state) {
909 case QUNIT_RM_FROM_HASH:
915 CERROR("invalid qunit state %d\n", qunit->lq_state);
917 cfs_spin_unlock(&qunit->lq_lock);
920 cfs_spin_lock(&qctxt->lqc_lock);
921 rc = !qctxt->lqc_valid;
923 rc |= !qctxt->lqc_import;
924 cfs_spin_unlock(&qctxt->lqc_lock);
931 revoke_lqs_rec(struct lustre_qunit_size *lqs, struct qunit_data *qdata, int opc)
933 /* revoke lqs_xxx_rec which is computed in check_cur_qunit
935 cfs_spin_lock(&lqs->lqs_lock);
936 quota_compute_lqs(qdata, lqs, 0, (opc == QUOTA_DQACQ) ? 1 : 0);
937 cfs_spin_unlock(&lqs->lqs_lock);
941 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
942 struct qunit_data *qdata, int opc, int wait,
943 struct obd_trans_info *oti)
945 struct lustre_qunit *qunit, *empty;
946 struct l_wait_info lwi = { 0 };
947 struct ptlrpc_request *req;
948 struct dqacq_async_args *aa;
949 struct obd_import *imp = NULL;
950 struct lustre_qunit_size *lqs = NULL;
951 struct timeval work_start;
952 struct timeval work_end;
957 LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
958 cfs_gettimeofday(&work_start);
960 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
962 if (lqs == NULL || IS_ERR(lqs)) {
963 CERROR("Can't find the lustre qunit size!\n");
967 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL) {
968 revoke_lqs_rec(lqs, qdata, opc);
969 /* this is for quota_search_lqs */
974 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_SD, 5);
976 cfs_spin_lock(&qunit_hash_lock);
977 qunit = dqacq_in_flight(qctxt, qdata);
979 cfs_spin_unlock(&qunit_hash_lock);
982 revoke_lqs_rec(lqs, qdata, opc);
983 /* this is for quota_search_lqs */
985 goto wait_completion;
989 insert_qunit_nolock(qctxt, qunit);
990 cfs_spin_unlock(&qunit_hash_lock);
992 /* From here, the quota request will be sent anyway.
993 * When this qdata request returned or is cancelled,
994 * lqs_putref will be called at that time */
996 /* this is for quota_search_lqs */
999 QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
1000 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
1001 /* master is going to dqacq/dqrel from itself */
1002 if (is_master(qctxt)) {
1004 QDATA_DEBUG(qdata, "local %s.\n",
1005 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
1006 QDATA_SET_CHANGE_QS(qdata);
1007 rc = qctxt->lqc_handler(obd, qdata, opc);
1008 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
1009 /* this is for qunit_get() */
1012 cfs_gettimeofday(&work_end);
1013 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1014 if (opc == QUOTA_DQACQ)
1015 lprocfs_counter_add(qctxt->lqc_stats,
1016 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1019 lprocfs_counter_add(qctxt->lqc_stats,
1020 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1022 RETURN(rc ? rc : rc2);
1025 cfs_spin_lock(&qctxt->lqc_lock);
1026 if (!qctxt->lqc_import) {
1027 cfs_spin_unlock(&qctxt->lqc_lock);
1028 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
1030 cfs_spin_lock(&qunit_hash_lock);
1031 remove_qunit_nolock(qunit);
1032 cfs_spin_unlock(&qunit_hash_lock);
1034 compute_lqs_after_removing_qunit(qunit);
1036 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
1037 cfs_waitq_broadcast(&qunit->lq_waitq);
1039 /* this is for qunit_get() */
1041 /* this for alloc_qunit() */
1043 cfs_spin_lock(&qctxt->lqc_lock);
1044 if (wait && !qctxt->lqc_import) {
1045 cfs_spin_unlock(&qctxt->lqc_lock);
1046 LASSERT(oti && oti->oti_thread);
1047 /* The recovery thread doesn't have watchdog
1048 * attached. LU-369 */
1049 if (oti->oti_thread->t_watchdog)
1050 lc_watchdog_disable(oti->oti_thread->\
1052 CDEBUG(D_QUOTA, "sleep for quota master\n");
1053 l_wait_event(qctxt->lqc_wait_for_qmaster,
1054 check_qm(qctxt), &lwi);
1055 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
1056 if (oti->oti_thread->t_watchdog)
1057 lc_watchdog_touch(oti->oti_thread->t_watchdog,
1058 CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
1060 cfs_spin_unlock(&qctxt->lqc_lock);
1065 imp = class_import_get(qctxt->lqc_import);
1066 cfs_spin_unlock(&qctxt->lqc_lock);
1068 /* build dqacq/dqrel request */
1071 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_QUOTA_DQACQ,
1072 LUSTRE_MDS_VERSION, opc);
1073 class_import_put(imp);
1075 CERROR("Can't alloc request\n");
1076 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
1077 /* this is for qunit_get() */
1082 ptlrpc_request_set_replen(req);
1083 req->rq_no_resend = req->rq_no_delay = 1;
1084 rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
1086 CERROR("Can't pack qunit_data(rc: %d)\n", rc);
1087 ptlrpc_req_finished(req);
1088 dqacq_completion(obd, qctxt, qdata, -EPROTO, opc);
1089 /* this is for qunit_get() */
1094 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1095 aa = ptlrpc_req_async_args(req);
1096 aa->aa_ctxt = qctxt;
1097 aa->aa_qunit = qunit;
1099 req->rq_interpret_reply = dqacq_interpret;
1100 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
1102 QDATA_DEBUG(qdata, "%s scheduled.\n",
1103 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
1105 if (wait && qunit) {
1106 struct qunit_data *p = &qunit->lq_data;
1108 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1109 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1111 /* rc = -EAGAIN, it means the quota master isn't ready yet
1112 * rc = QUOTA_REQ_RETURNED, it means a quota req is finished;
1113 * rc = -EDQUOT, it means out of quota
1114 * rc = -EBUSY, it means recovery is happening
1115 * other rc < 0, it means real errors, functions who call
1116 * schedule_dqacq should take care of this */
1117 cfs_spin_lock(&qunit->lq_lock);
1119 cfs_spin_unlock(&qunit->lq_lock);
1120 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
1121 "rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
1122 qunit->lq_data.qd_flags, rc, qunit->lq_owner);
1126 cfs_gettimeofday(&work_end);
1127 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1128 if (opc == QUOTA_DQACQ)
1129 lprocfs_counter_add(qctxt->lqc_stats,
1130 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1133 lprocfs_counter_add(qctxt->lqc_stats,
1134 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1141 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
1142 const unsigned int id[], __u32 isblk, int wait,
1143 struct obd_trans_info *oti)
1145 int rc = 0, i = USRQUOTA;
1146 struct qunit_data qdata[MAXQUOTAS];
1149 if (quota_is_set(obd, id, isblk ? QB_SET : QI_SET) == 0)
1152 for (i = 0; i < MAXQUOTAS; i++) {
1153 qdata[i].qd_id = id[i];
1154 qdata[i].qd_flags = i;
1156 QDATA_SET_BLK(&qdata[i]);
1157 qdata[i].qd_count = 0;
1159 rc = check_cur_qunit(obd, qctxt, &qdata[i]);
1162 /* need acquire or release */
1163 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1164 rc = schedule_dqacq(obd, qctxt, &qdata[i], opc,
1168 } else if (wait == 1) {
1169 /* when wait equates 1, that means mds_quota_acquire
1170 * or filter_quota_acquire is calling it. */
1171 rc = qctxt_wait_pending_dqacq(qctxt, id[i], i, isblk);
1181 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
1182 unsigned short type, int isblk)
1184 struct lustre_qunit *qunit = NULL;
1185 struct qunit_data qdata;
1186 struct timeval work_start;
1187 struct timeval work_end;
1189 struct l_wait_info lwi = { 0 };
1193 cfs_gettimeofday(&work_start);
1195 qdata.qd_flags = type;
1197 QDATA_SET_BLK(&qdata);
1200 cfs_spin_lock(&qunit_hash_lock);
1201 qunit = dqacq_in_flight(qctxt, &qdata);
1202 cfs_spin_unlock(&qunit_hash_lock);
1205 struct qunit_data *p = &qunit->lq_data;
1207 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1208 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1210 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
1211 "owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
1212 /* keep same as schedule_dqacq() b=17030 */
1213 cfs_spin_lock(&qunit->lq_lock);
1215 cfs_spin_unlock(&qunit->lq_lock);
1216 /* this is for dqacq_in_flight() */
1218 cfs_gettimeofday(&work_end);
1219 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1220 lprocfs_counter_add(qctxt->lqc_stats,
1221 isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
1222 LQUOTA_WAIT_PENDING_INO_QUOTA,
1225 cfs_gettimeofday(&work_end);
1226 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1227 lprocfs_counter_add(qctxt->lqc_stats,
1228 isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
1229 LQUOTA_NOWAIT_PENDING_INO_QUOTA,
1237 qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
1239 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
1240 struct obd_device_target *obt = &obd->u.obt;
1241 struct super_block *sb = obt->obt_sb;
1247 rc = ptlrpcd_addref();
1251 cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
1252 cfs_waitq_init(&qctxt->lqc_lqs_waitq);
1253 cfs_atomic_set(&qctxt->lqc_lqs, 0);
1254 cfs_spin_lock_init(&qctxt->lqc_lock);
1255 cfs_spin_lock(&qctxt->lqc_lock);
1256 qctxt->lqc_handler = handler;
1258 qctxt->lqc_obt = obt;
1259 qctxt->lqc_import = NULL;
1260 qctxt->lqc_recovery = 0;
1261 qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
1262 qctxt->lqc_valid = 1;
1263 qctxt->lqc_cqs_boundary_factor = 4;
1264 qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
1265 qctxt->lqc_cqs_least_iunit = 2;
1266 qctxt->lqc_cqs_qs_factor = 2;
1267 qctxt->lqc_flags = 0;
1268 QUOTA_MASTER_UNREADY(qctxt);
1269 qctxt->lqc_bunit_sz = default_bunit_sz;
1270 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
1271 qctxt->lqc_iunit_sz = default_iunit_sz;
1272 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
1273 qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
1274 * after the last shrinking */
1275 qctxt->lqc_sync_blk = 0;
1276 cfs_spin_unlock(&qctxt->lqc_lock);
1278 qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
1281 min(hash_lqs_cur_bits,
1283 0, CFS_HASH_MIN_THETA,
1285 &lqs_hash_ops, CFS_HASH_DEFAULT);
1286 if (!qctxt->lqc_lqs_hash) {
1287 CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
1292 rc = lquota_proc_setup(obd, is_master(qctxt));
1294 CERROR("initialize proc for %s error!\n", obd->obd_name);
1300 static int check_lqs(struct lustre_quota_ctxt *qctxt)
1305 rc = !cfs_atomic_read(&qctxt->lqc_lqs);
1310 int qctxt_del_lqs(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1311 cfs_hlist_node_t *hnode, void *data)
1313 /* remove from hash and -1 refcount */
1314 cfs_hash_bd_del_locked(hs, bd, hnode);
1318 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
1320 struct lustre_qunit *qunit, *tmp;
1321 cfs_list_t tmp_list;
1322 struct l_wait_info lwi = { 0 };
1323 struct obd_device_target *obt = qctxt->lqc_obt;
1327 CFS_INIT_LIST_HEAD(&tmp_list);
1329 cfs_spin_lock(&qctxt->lqc_lock);
1330 qctxt->lqc_valid = 0;
1331 cfs_spin_unlock(&qctxt->lqc_lock);
1333 cfs_spin_lock(&qunit_hash_lock);
1334 for (i = 0; i < NR_DQHASH; i++) {
1335 cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
1337 if (qunit->lq_ctxt != qctxt)
1339 remove_qunit_nolock(qunit);
1340 cfs_list_add(&qunit->lq_hash, &tmp_list);
1343 cfs_spin_unlock(&qunit_hash_lock);
1345 cfs_list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
1346 cfs_list_del_init(&qunit->lq_hash);
1347 compute_lqs_after_removing_qunit(qunit);
1349 /* wake up all waiters */
1350 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
1351 cfs_waitq_broadcast(&qunit->lq_waitq);
1355 /* after qctxt_cleanup, qctxt might be freed, then check_qm() is
1356 * unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
1357 while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
1358 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
1359 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
1360 cfs_time_seconds(1));
1363 /* release refcount on lustre_qunit_size holding by lqs_hash */
1364 cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, qctxt_del_lqs, NULL);
1366 l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
1367 cfs_down_write(&obt->obt_rwsem);
1368 cfs_hash_putref(qctxt->lqc_lqs_hash);
1369 qctxt->lqc_lqs_hash = NULL;
1370 cfs_up_write(&obt->obt_rwsem);
1375 if (lquota_proc_cleanup(qctxt))
1376 CERROR("cleanup proc error!\n");
1382 struct qslave_recov_thread_data {
1383 struct obd_device *obd;
1384 struct lustre_quota_ctxt *qctxt;
1385 cfs_completion_t comp;
1388 /* FIXME only recovery block quota by now */
1389 static int qslave_recovery_main(void *arg)
1391 struct qslave_recov_thread_data *data = arg;
1392 struct obd_device *obd = data->obd;
1393 struct lustre_quota_ctxt *qctxt = data->qctxt;
1398 cfs_daemonize_ctxt("qslave_recovd");
1401 class_incref(obd, "qslave_recovd_filter", obd);
1403 cfs_complete(&data->comp);
1405 cfs_spin_lock(&qctxt->lqc_lock);
1406 if (qctxt->lqc_recovery) {
1407 cfs_spin_unlock(&qctxt->lqc_lock);
1408 class_decref(obd, "qslave_recovd_filter", obd);
1411 qctxt->lqc_recovery = 1;
1412 cfs_spin_unlock(&qctxt->lqc_lock);
1415 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1416 struct qunit_data qdata;
1417 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1419 struct dquot_id *dqid, *tmp;
1422 LOCK_DQONOFF_MUTEX(dqopt);
1423 if (!ll_sb_has_quota_active(qctxt->lqc_sb, type)) {
1424 UNLOCK_DQONOFF_MUTEX(dqopt);
1428 LASSERT(dqopt->files[type] != NULL);
1429 CFS_INIT_LIST_HEAD(&id_list);
1430 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1431 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1433 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1435 UNLOCK_DQONOFF_MUTEX(dqopt);
1437 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1439 cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1440 cfs_list_del_init(&dqid->di_link);
1441 /* skip slave recovery on itself */
1442 if (is_master(qctxt))
1444 if (rc && rc != -EBUSY)
1447 qdata.qd_id = dqid->di_id;
1448 qdata.qd_flags = type;
1449 QDATA_SET_BLK(&qdata);
1452 ret = check_cur_qunit(obd, qctxt, &qdata);
1455 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1456 rc = schedule_dqacq(obd, qctxt, &qdata, opc,
1464 if (rc && rc != -EBUSY)
1465 CERROR("qslave recovery failed! (id:%d type:%d "
1466 " rc:%d)\n", dqid->di_id, type, rc);
1472 cfs_spin_lock(&qctxt->lqc_lock);
1473 qctxt->lqc_recovery = 0;
1474 cfs_spin_unlock(&qctxt->lqc_lock);
1475 class_decref(obd, "qslave_recovd_filter", obd);
1480 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1482 struct qslave_recov_thread_data data;
1486 if (!ll_sb_any_quota_active(qctxt->lqc_sb))
1491 cfs_init_completion(&data.comp);
1493 rc = cfs_create_thread(qslave_recovery_main, &data,
1496 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1499 cfs_wait_for_completion(&data.comp);
1504 inline int quota_is_on(struct lustre_quota_ctxt *qctxt,
1505 struct obd_quotactl *oqctl)
1507 return ((qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)) ==
1508 UGQUOTA2LQC(oqctl->qc_type));
1511 inline int quota_is_off(struct lustre_quota_ctxt *qctxt,
1512 struct obd_quotactl *oqctl)
1514 return !(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type));
1518 * When quotaon, build a lqs for every uid/gid who has been set limitation
1519 * for quota. After quota_search_lqs, it will hold one ref for the lqs.
1520 * It will be released when qctxt_cleanup() is executed b=18574
1522 * Should be called with obt->obt_quotachecking held. b=20152
1524 void build_lqs(struct obd_device *obd)
1526 struct obd_device_target *obt = &obd->u.obt;
1527 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
1531 LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
1532 CFS_INIT_LIST_HEAD(&id_list);
1533 for (i = 0; i < MAXQUOTAS; i++) {
1534 struct dquot_id *dqid, *tmp;
1536 if (sb_dqopt(qctxt->lqc_sb)->files[i] == NULL)
1539 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1540 rc = fsfilt_qids(obd, sb_dqopt(qctxt->lqc_sb)->files[i], NULL,
1543 rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
1547 CERROR("%s: failed to get %s qids!\n", obd->obd_name,
1548 i ? "group" : "user");
1552 cfs_list_for_each_entry_safe(dqid, tmp, &id_list,
1554 struct lustre_qunit_size *lqs;
1556 cfs_list_del_init(&dqid->di_link);
1557 lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
1559 if (lqs && !IS_ERR(lqs)) {
1560 lqs->lqs_flags |= dqid->di_flag;
1563 CERROR("%s: failed to create a lqs for %sid %u"
1564 "\n", obd->obd_name, i ? "g" : "u",
1574 * lqs<->qctxt hash operations
1578 * string hashing using djb2 hash algorithm
1581 lqs_hash(cfs_hash_t *hs, const void *key, unsigned mask)
1583 unsigned long long id;
1588 id = *((unsigned long long *)key);
1589 hash = (LQS_KEY_GRP(id) ? 5381 : 5387) * (unsigned)LQS_KEY_ID(id);
1591 RETURN(hash & mask);
1595 lqs_key(cfs_hlist_node_t *hnode)
1597 struct lustre_qunit_size *lqs;
1600 lqs = cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1601 RETURN(&lqs->lqs_key);
1605 lqs_keycmp(const void *key, cfs_hlist_node_t *hnode)
1607 struct lustre_qunit_size *q =
1608 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1610 RETURN(q->lqs_key == *((unsigned long long *)key));
1614 lqs_object(cfs_hlist_node_t *hnode)
1616 return cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1620 lqs_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1622 struct lustre_qunit_size *q =
1623 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1629 lqs_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1631 struct lustre_qunit_size *q =
1632 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1638 lqs_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1640 CERROR("It should not have any item left to be handled by this!");
1643 static cfs_hash_ops_t lqs_hash_ops = {
1644 .hs_hash = lqs_hash,
1646 .hs_keycmp = lqs_keycmp,
1647 .hs_object = lqs_object,
1649 .hs_put_locked = lqs_put_locked,
1652 #endif /* HAVE_QUOTA_SUPPORT */