1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/quota/quota_context.c
38 * Lustre Quota Context
40 * Author: Niu YaWei <niu@clusterfs.com>
44 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <linux/version.h>
51 #include <asm/unistd.h>
52 #include <linux/slab.h>
53 #include <linux/quotaops.h>
54 #include <linux/module.h>
55 #include <linux/init.h>
57 #include <obd_class.h>
58 #include <lustre_quota.h>
59 #include <lustre_fsfilt.h>
60 #include "quota_internal.h"
62 unsigned long default_bunit_sz = 100 * 1024 * 1024; /* 100M bytes */
63 unsigned long default_btune_ratio = 50; /* 50 percentage */
64 unsigned long default_iunit_sz = 5000; /* 5000 inodes */
65 unsigned long default_itune_ratio = 50; /* 50 percentage */
67 cfs_mem_cache_t *qunit_cachep = NULL;
68 struct list_head qunit_hash[NR_DQHASH];
69 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
72 struct list_head lq_hash; /* Hash list in memory */
73 atomic_t lq_refcnt; /* Use count */
74 struct lustre_quota_ctxt *lq_ctxt; /* Quota context this applies to */
75 struct qunit_data lq_data; /* See qunit_data */
76 unsigned int lq_opc; /* QUOTA_DQACQ, QUOTA_DQREL */
77 struct list_head lq_waiters; /* All write threads waiting for this qunit */
80 int should_translate_quota (struct obd_import *imp)
85 if ((imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) &&
86 !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
92 void qunit_cache_cleanup(void)
97 spin_lock(&qunit_hash_lock);
98 for (i = 0; i < NR_DQHASH; i++)
99 LASSERT(list_empty(qunit_hash + i));
100 spin_unlock(&qunit_hash_lock);
104 rc = cfs_mem_cache_destroy(qunit_cachep);
105 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
111 int qunit_cache_init(void)
116 LASSERT(qunit_cachep == NULL);
117 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
118 sizeof(struct lustre_qunit),
123 spin_lock(&qunit_hash_lock);
124 for (i = 0; i < NR_DQHASH; i++)
125 CFS_INIT_LIST_HEAD(qunit_hash + i);
126 spin_unlock(&qunit_hash_lock);
131 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
132 __attribute__((__const__));
135 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
137 unsigned int id = qdata->qd_id;
138 unsigned int type = qdata->qd_flags & QUOTA_IS_GRP;
140 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
141 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
145 /* compute the remaining quota for certain gid or uid b=11693 */
146 int compute_remquota(struct obd_device *obd,
147 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
149 struct super_block *sb = qctxt->lqc_sb;
151 struct obd_quotactl *qctl;
152 int ret = QUOTA_RET_OK;
153 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
156 if (!sb_any_quota_enabled(sb))
157 RETURN(QUOTA_RET_NOQUOTA);
159 /* ignore root user */
160 if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
161 RETURN(QUOTA_RET_NOLIMIT);
167 /* get fs quota usage & limit */
168 qctl->qc_cmd = Q_GETQUOTA;
169 qctl->qc_id = qdata->qd_id;
170 qctl->qc_type = qdata_type;
171 ret = fsfilt_quotactl(obd, sb, qctl);
173 if (ret == -ESRCH) /* no limit */
174 ret = QUOTA_RET_NOLIMIT;
176 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
181 usage = qctl->qc_dqblk.dqb_curspace;
182 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
183 if (!limit){ /* no limit */
184 ret = QUOTA_RET_NOLIMIT;
189 qdata->qd_count = limit - usage;
198 /* caller must hold qunit_hash_lock */
199 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
200 struct lustre_quota_ctxt *qctxt,
201 struct qunit_data *qdata)
203 struct lustre_qunit *qunit = NULL;
204 struct qunit_data *tmp;
206 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
207 list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
208 tmp = &qunit->lq_data;
209 if (qunit->lq_ctxt == qctxt &&
210 qdata->qd_id == tmp->qd_id && qdata->qd_flags == tmp->qd_flags)
216 /* check_cur_qunit - check the current usage of qunit.
217 * @qctxt: quota context
218 * @qdata: the type of quota unit to be checked
220 * return: 1 - need acquire qunit;
221 * 2 - need release qunit;
222 * 0 - need do nothing.
226 check_cur_qunit(struct obd_device *obd,
227 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
229 struct super_block *sb = qctxt->lqc_sb;
230 unsigned long qunit_sz, tune_sz;
232 struct obd_quotactl *qctl;
234 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
235 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
238 if (!sb_any_quota_enabled(sb))
245 /* get fs quota usage & limit */
246 qctl->qc_cmd = Q_GETQUOTA;
247 qctl->qc_id = qdata->qd_id;
248 qctl->qc_type = qdata_type;
249 ret = fsfilt_quotactl(obd, sb, qctl);
251 if (ret == -ESRCH) /* no limit */
254 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
259 usage = qctl->qc_dqblk.dqb_curspace;
260 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
261 qunit_sz = qctxt->lqc_bunit_sz;
262 tune_sz = qctxt->lqc_btune_sz;
264 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
266 usage = qctl->qc_dqblk.dqb_curinodes;
267 limit = qctl->qc_dqblk.dqb_ihardlimit;
268 qunit_sz = qctxt->lqc_iunit_sz;
269 tune_sz = qctxt->lqc_itune_sz;
272 /* ignore the no quota limit case */
276 /* we don't count the MIN_QLIMIT */
277 if ((limit == MIN_QLIMIT && !is_blk) ||
278 (toqb(limit) == MIN_QLIMIT && is_blk))
281 LASSERT(qdata->qd_count == 0);
282 if (limit <= usage + tune_sz) {
283 while (qdata->qd_count + limit <= usage + tune_sz)
284 qdata->qd_count += qunit_sz;
286 } else if (limit > usage + qunit_sz + tune_sz) {
287 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz)
288 qdata->qd_count += qunit_sz;
291 LASSERT(ret == 0 || qdata->qd_count);
298 /* caller must hold qunit_hash_lock */
299 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
300 struct qunit_data *qdata)
302 unsigned int hashent = qunit_hashfn(qctxt, qdata);
303 struct lustre_qunit *qunit;
306 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
307 qunit = find_qunit(hashent, qctxt, qdata);
311 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
312 struct qunit_data *qdata, int opc)
314 struct lustre_qunit *qunit = NULL;
317 OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
321 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
322 CFS_INIT_LIST_HEAD(&qunit->lq_waiters);
323 atomic_set(&qunit->lq_refcnt, 1);
324 qunit->lq_ctxt = qctxt;
325 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
331 static inline void free_qunit(struct lustre_qunit *qunit)
333 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
336 static inline void qunit_get(struct lustre_qunit *qunit)
338 atomic_inc(&qunit->lq_refcnt);
341 static void qunit_put(struct lustre_qunit *qunit)
343 LASSERT(atomic_read(&qunit->lq_refcnt));
344 if (atomic_dec_and_test(&qunit->lq_refcnt))
349 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
351 struct list_head *head;
353 LASSERT(list_empty(&qunit->lq_hash));
354 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
355 list_add(&qunit->lq_hash, head);
358 static void remove_qunit_nolock(struct lustre_qunit *qunit)
360 LASSERT(!list_empty(&qunit->lq_hash));
361 list_del_init(&qunit->lq_hash);
364 struct qunit_waiter {
365 struct list_head qw_entry;
366 cfs_waitq_t qw_waitq;
370 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
371 (limit = count) : (limit += count)
374 /* FIXME check if this mds is the master of specified id */
376 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
377 unsigned int id, int type)
379 return qctxt->lqc_handler ? 1 : 0;
383 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
384 struct qunit_data *qdata, int opc, int wait);
386 static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
387 struct qunit_data *qdata, int opc, int wait)
390 unsigned long factor;
391 struct qunit_data tmp_qdata;
394 LASSERT(qdata && qdata->qd_count);
395 QDATA_DEBUG(qdata, "%s quota split.\n",
396 (qdata->qd_flags & QUOTA_IS_BLOCK) ? "block" : "inode");
397 if (qdata->qd_flags & QUOTA_IS_BLOCK)
398 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
401 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
404 if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
405 qdata->qd_count > factor) {
407 tmp_qdata.qd_count = factor;
408 qdata->qd_count -= tmp_qdata.qd_count;
409 QDATA_DEBUG((&tmp_qdata), "be split.\n");
410 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
412 QDATA_DEBUG(qdata, "don't be split.\n");
413 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
420 dqacq_completion(struct obd_device *obd,
421 struct lustre_quota_ctxt *qctxt,
422 struct qunit_data *qdata, int rc, int opc)
424 struct lustre_qunit *qunit = NULL;
425 struct super_block *sb = qctxt->lqc_sb;
426 unsigned long qunit_sz;
427 struct qunit_waiter *qw, *tmp;
429 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
430 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
431 __u64 qd_tmp = qdata->qd_count;
436 qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
437 div_r = do_div(qd_tmp, qunit_sz);
438 LASSERTF(!div_r, "qunit_sz: %lu, return qunit_sz: "LPU64"\n",
441 /* update local operational quota file */
443 __u32 count = QUSG(qdata->qd_count, is_blk);
444 struct obd_quotactl *qctl;
449 GOTO(out, err = -ENOMEM);
451 /* acq/rel qunit for specified uid/gid is serialized,
452 * so there is no race between get fs quota limit and
453 * set fs quota limit */
454 qctl->qc_cmd = Q_GETQUOTA;
455 qctl->qc_id = qdata->qd_id;
456 qctl->qc_type = qdata_type;
457 err = fsfilt_quotactl(obd, sb, qctl);
459 CERROR("error get quota fs limit! (rc:%d)\n", err);
464 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
465 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
467 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
468 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
473 CDEBUG(D_QUOTA, "%s(acq):count: %d, hardlimt: "LPU64
474 ",type: %s.\n", obd->obd_name, count, *hardlimit,
475 qdata_type ? "grp": "usr");
476 INC_QLIMIT(*hardlimit, count);
479 CDEBUG(D_QUOTA, "%s(rel):count: %d, hardlimt: "LPU64
480 ",type: %s.\n", obd->obd_name, count, *hardlimit,
481 qdata_type ? "grp": "usr");
482 LASSERTF(count < *hardlimit,
483 "count: %d, hardlimit: "LPU64".\n",
491 /* clear quota limit */
495 qctl->qc_cmd = Q_SETQUOTA;
496 err = fsfilt_quotactl(obd, sb, qctl);
498 CERROR("error set quota fs limit! (rc:%d)\n", err);
500 QDATA_DEBUG(qdata, "%s completion\n",
501 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
504 } else if (rc == -EDQUOT) {
505 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
506 } else if (rc == -EBUSY) {
507 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
509 CERROR("acquire qunit got error! (rc:%d)\n", rc);
512 /* remove the qunit from hash */
513 spin_lock(&qunit_hash_lock);
515 qunit = dqacq_in_flight(qctxt, qdata);
516 /* this qunit has been removed by qctxt_cleanup() */
518 spin_unlock(&qunit_hash_lock);
522 LASSERT(opc == qunit->lq_opc);
523 remove_qunit_nolock(qunit);
525 /* wake up all waiters */
526 list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
527 list_del_init(&qw->qw_entry);
529 wake_up(&qw->qw_waitq);
532 spin_unlock(&qunit_hash_lock);
536 /* don't reschedule in such cases:
537 * - acq/rel failure, but not for quota recovery.
538 * - local dqacq/dqrel.
539 * - local disk io failure.
541 if (err || (rc && rc != -EBUSY) ||
542 is_master(obd, qctxt, qdata->qd_id, qdata_type))
545 /* reschedule another dqacq/dqrel if needed */
547 rc = check_cur_qunit(obd, qctxt, qdata);
550 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
551 rc = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
552 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc);
557 struct dqacq_async_args {
558 struct lustre_quota_ctxt *aa_ctxt;
559 struct lustre_qunit *aa_qunit;
562 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
564 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
565 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
566 struct lustre_qunit *qunit = aa->aa_qunit;
567 struct obd_device *obd = req->rq_import->imp_obd;
568 struct qunit_data *qdata = NULL;
569 struct qunit_data_old *qdata_old = NULL;
573 LASSERT(req->rq_import);
575 if ((req->rq_import->imp_connect_data.ocd_connect_flags &
576 OBD_CONNECT_QUOTA64) &&
577 !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
578 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
580 qdata = req_capsule_server_swab_get(&req->rq_pill,
582 (void*)lustre_swab_qdata);
584 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
586 qdata = req_capsule_server_swab_get(&req->rq_pill,
588 (void*)lustre_swab_qdata_old);
589 qdata = lustre_quota_old_to_new(qdata_old);
592 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data");
596 LASSERT(qdata->qd_id == qunit->lq_data.qd_id &&
597 (qdata->qd_flags & QUOTA_IS_GRP) ==
598 (qunit->lq_data.qd_flags & QUOTA_IS_GRP) &&
599 (qdata->qd_count == qunit->lq_data.qd_count ||
600 qdata->qd_count == 0));
602 QDATA_DEBUG(qdata, "%s interpret rc(%d).\n",
603 lustre_msg_get_opc(req->rq_reqmsg) == QUOTA_DQACQ ?
604 "DQACQ" : "DQREL", rc);
606 rc = dqacq_completion(obd, qctxt, qdata, rc,
607 lustre_msg_get_opc(req->rq_reqmsg));
612 static int got_qunit(struct qunit_waiter *waiter)
616 spin_lock(&qunit_hash_lock);
617 rc = list_empty(&waiter->qw_entry);
618 spin_unlock(&qunit_hash_lock);
623 schedule_dqacq(struct obd_device *obd,
624 struct lustre_quota_ctxt *qctxt,
625 struct qunit_data *qdata, int opc, int wait)
627 struct lustre_qunit *qunit, *empty;
628 struct qunit_waiter qw;
629 struct l_wait_info lwi = { 0 };
630 struct ptlrpc_request *req;
631 struct qunit_data *reqdata;
632 struct dqacq_async_args *aa;
633 unsigned long factor;
637 CFS_INIT_LIST_HEAD(&qw.qw_entry);
638 init_waitqueue_head(&qw.qw_waitq);
641 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
644 spin_lock(&qunit_hash_lock);
646 qunit = dqacq_in_flight(qctxt, qdata);
649 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
650 spin_unlock(&qunit_hash_lock);
653 goto wait_completion;
656 insert_qunit_nolock(qctxt, qunit);
658 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
659 spin_unlock(&qunit_hash_lock);
663 /* master is going to dqacq/dqrel from itself */
664 if (is_master(obd, qctxt, qdata->qd_id, qdata->qd_flags & QUOTA_IS_GRP))
667 QDATA_DEBUG(qdata, "local %s.\n",
668 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
669 rc = qctxt->lqc_handler(obd, qdata, opc);
670 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
671 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
674 /* build dqacq/dqrel request */
675 LASSERT(qctxt->lqc_import);
677 req = ptlrpc_request_alloc_pack(qctxt->lqc_import, &RQF_MDS_QUOTA_DQACQ,
678 LUSTRE_MDS_VERSION, opc);
680 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
684 if (qdata->qd_flags & QUOTA_IS_BLOCK)
685 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
688 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
691 LASSERT(!should_translate_quota(qctxt->lqc_import) ||
692 qdata->qd_count <= factor);
693 if (should_translate_quota(qctxt->lqc_import))
695 struct qunit_data_old *reqdata_old, *tmp;
697 reqdata_old = req_capsule_client_get(&req->rq_pill,
700 tmp = lustre_quota_new_to_old(qdata);
702 req_capsule_set_size(&req->rq_pill, &RMF_QUNIT_DATA, RCL_SERVER,
703 sizeof(*reqdata_old));
704 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
706 reqdata = req_capsule_client_get(&req->rq_pill,
710 req_capsule_set_size(&req->rq_pill, &RMF_QUNIT_DATA, RCL_SERVER,
712 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
714 ptlrpc_request_set_replen(req);
716 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
717 aa = (struct dqacq_async_args *)&req->rq_async_args;
719 aa->aa_qunit = qunit;
721 req->rq_interpret_reply = dqacq_interpret;
722 ptlrpcd_add_req(req);
724 QDATA_DEBUG(qdata, "%s scheduled.\n",
725 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
728 struct qunit_data *p = &qunit->lq_data;
729 QDATA_DEBUG(p, "wait for dqacq.\n");
731 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
735 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
741 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
742 uid_t uid, gid_t gid, __u32 isblk, int wait)
744 int ret, rc = 0, i = USRQUOTA;
745 __u32 id[MAXQUOTAS] = { uid, gid };
746 struct qunit_data qdata[MAXQUOTAS];
749 CLASSERT(MAXQUOTAS < 4);
750 if (!sb_any_quota_enabled(qctxt->lqc_sb))
753 for (i = 0; i < MAXQUOTAS; i++) {
754 qdata[i].qd_id = id[i];
755 qdata[i].qd_flags = 0;
756 qdata[i].qd_flags |= i;
757 qdata[i].qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
758 qdata[i].qd_count = 0;
760 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
763 /* need acquire or release */
764 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
765 ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i],
776 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
777 unsigned short type, int isblk)
779 struct lustre_qunit *qunit = NULL;
780 struct qunit_waiter qw;
781 struct qunit_data qdata;
782 struct l_wait_info lwi = { 0 };
785 CFS_INIT_LIST_HEAD(&qw.qw_entry);
786 init_waitqueue_head(&qw.qw_waitq);
791 qdata.qd_flags |= type;
792 qdata.qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
795 spin_lock(&qunit_hash_lock);
797 qunit = dqacq_in_flight(qctxt, &qdata);
799 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
801 spin_unlock(&qunit_hash_lock);
804 struct qunit_data *p = &qdata;
805 QDATA_DEBUG(p, "wait for dqacq completion.\n");
806 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
807 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
813 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
814 dqacq_handler_t handler)
819 rc = ptlrpcd_addref();
823 qctxt->lqc_handler = handler;
825 qctxt->lqc_import = NULL;
826 qctxt->lqc_recovery = 0;
827 qctxt->lqc_atype = 0;
828 qctxt->lqc_status= 0;
829 qctxt->lqc_bunit_sz = default_bunit_sz;
830 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
831 qctxt->lqc_iunit_sz = default_iunit_sz;
832 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
837 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
839 struct lustre_qunit *qunit, *tmp;
840 struct qunit_waiter *qw, *tmp2;
844 spin_lock(&qunit_hash_lock);
846 for (i = 0; i < NR_DQHASH; i++) {
847 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
848 if (qunit->lq_ctxt != qctxt)
851 remove_qunit_nolock(qunit);
852 /* wake up all waiters */
853 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
855 list_del_init(&qw->qw_entry);
857 wake_up(&qw->qw_waitq);
863 spin_unlock(&qunit_hash_lock);
870 struct qslave_recov_thread_data {
871 struct obd_device *obd;
872 struct lustre_quota_ctxt *qctxt;
873 struct completion comp;
876 /* FIXME only recovery block quota by now */
877 static int qslave_recovery_main(void *arg)
879 struct qslave_recov_thread_data *data = arg;
880 struct obd_device *obd = data->obd;
881 struct lustre_quota_ctxt *qctxt = data->qctxt;
886 ptlrpc_daemonize("qslave_recovd");
888 complete(&data->comp);
890 if (qctxt->lqc_recovery)
892 qctxt->lqc_recovery = 1;
894 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
895 struct qunit_data qdata;
896 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
897 struct list_head id_list;
898 struct dquot_id *dqid, *tmp;
901 LOCK_DQONOFF_MUTEX(dqopt);
902 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
903 UNLOCK_DQONOFF_MUTEX(dqopt);
907 LASSERT(dqopt->files[type] != NULL);
908 CFS_INIT_LIST_HEAD(&id_list);
909 #ifndef KERNEL_SUPPORTS_QUOTA_READ
910 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
912 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
914 UNLOCK_DQONOFF_MUTEX(dqopt);
916 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
918 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
919 list_del_init(&dqid->di_link);
920 /* skip slave recovery on itself */
921 if (is_master(obd, qctxt, dqid->di_id, type))
923 if (rc && rc != -EBUSY)
926 qdata.qd_id = dqid->di_id;
928 qdata.qd_flags |= type;
929 qdata.qd_flags |= QUOTA_IS_BLOCK;
932 ret = check_cur_qunit(obd, qctxt, &qdata);
935 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
936 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
941 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
942 "qslave recovery failed! (id:%d type:%d "
943 " rc:%d)\n", dqid->di_id, type, rc);
949 qctxt->lqc_recovery = 0;
954 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
956 struct qslave_recov_thread_data data;
960 if (!sb_any_quota_enabled(qctxt->lqc_sb))
965 init_completion(&data.comp);
967 rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
969 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
972 wait_for_completion(&data.comp);