1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/quota/quota_context.c
7 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * Author: Niu YaWei <niu@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * No redistribution or use is permitted outside of Cluster File Systems, Inc.
16 # define EXPORT_SYMTAB
19 #define DEBUG_SUBSYSTEM S_MDS
21 #include <linux/version.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include "quota_internal.h"
34 unsigned long default_bunit_sz = 100 * 1024 * 1024; /* 100M bytes */
35 unsigned long default_btune_ratio = 50; /* 50 percentage */
36 unsigned long default_iunit_sz = 5000; /* 5000 inodes */
37 unsigned long default_itune_ratio = 50; /* 50 percentage */
39 kmem_cache_t *qunit_cachep = NULL;
40 struct list_head qunit_hash[NR_DQHASH];
41 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
44 struct list_head lq_hash; /* Hash list in memory */
45 atomic_t lq_refcnt; /* Use count */
46 struct lustre_quota_ctxt *lq_ctxt; /* Quota context this applies to */
47 struct qunit_data lq_data; /* See qunit_data */
48 unsigned int lq_opc; /* QUOTA_DQACQ, QUOTA_DQREL */
49 struct list_head lq_waiters; /* All write threads waiting for this qunit */
52 int should_translate_quota (struct obd_import *imp)
57 if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
63 void qunit_cache_cleanup(void)
68 spin_lock(&qunit_hash_lock);
69 for (i = 0; i < NR_DQHASH; i++)
70 LASSERT(list_empty(qunit_hash + i));
71 spin_unlock(&qunit_hash_lock);
74 #ifdef HAVE_KMEM_CACHE_DESTROY_INT
76 rc = kmem_cache_destroy(qunit_cachep);
77 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
79 kmem_cache_destroy(qunit_cachep);
86 int qunit_cache_init(void)
91 LASSERT(qunit_cachep == NULL);
92 qunit_cachep = kmem_cache_create("ll_qunit_cache",
93 sizeof(struct lustre_qunit),
98 spin_lock(&qunit_hash_lock);
99 for (i = 0; i < NR_DQHASH; i++)
100 INIT_LIST_HEAD(qunit_hash + i);
101 spin_unlock(&qunit_hash_lock);
106 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
107 __attribute__((__const__));
110 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
112 unsigned int id = qdata->qd_id;
113 unsigned int type = qdata->qd_flags & QUOTA_IS_GRP;
115 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
116 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
120 /* caller must hold qunit_hash_lock */
121 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
122 struct lustre_quota_ctxt *qctxt,
123 struct qunit_data *qdata)
125 struct lustre_qunit *qunit = NULL;
126 struct qunit_data *tmp;
128 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
129 list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
130 tmp = &qunit->lq_data;
131 if (qunit->lq_ctxt == qctxt &&
132 qdata->qd_id == tmp->qd_id && qdata->qd_flags == tmp->qd_flags)
138 /* check_cur_qunit - check the current usage of qunit.
139 * @qctxt: quota context
140 * @qdata: the type of quota unit to be checked
142 * return: 1 - need acquire qunit;
143 * 2 - need release qunit;
144 * 0 - need do nothing.
148 check_cur_qunit(struct obd_device *obd,
149 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
151 struct super_block *sb = qctxt->lqc_sb;
152 unsigned long qunit_sz, tune_sz;
154 struct obd_quotactl *qctl;
156 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
157 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
160 if (!sb_any_quota_enabled(sb))
163 /* ignore root user */
164 if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
171 /* get fs quota usage & limit */
172 qctl->qc_cmd = Q_GETQUOTA;
173 qctl->qc_id = qdata->qd_id;
174 qctl->qc_type = qdata_type;
175 ret = fsfilt_quotactl(obd, sb, qctl);
177 if (ret == -ESRCH) /* no limit */
180 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
185 usage = qctl->qc_dqblk.dqb_curspace;
186 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
187 qunit_sz = qctxt->lqc_bunit_sz;
188 tune_sz = qctxt->lqc_btune_sz;
190 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
192 usage = qctl->qc_dqblk.dqb_curinodes;
193 limit = qctl->qc_dqblk.dqb_ihardlimit;
194 qunit_sz = qctxt->lqc_iunit_sz;
195 tune_sz = qctxt->lqc_itune_sz;
198 /* ignore the no quota limit case */
202 /* we don't count the MIN_QLIMIT */
203 if ((limit == MIN_QLIMIT && !is_blk) ||
204 (toqb(limit) == MIN_QLIMIT && is_blk))
207 LASSERT(qdata->qd_count == 0);
208 if (limit <= usage + tune_sz) {
209 while (qdata->qd_count + limit <= usage + tune_sz)
210 qdata->qd_count += qunit_sz;
212 } else if (limit > usage + qunit_sz + tune_sz) {
213 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz)
214 qdata->qd_count += qunit_sz;
217 LASSERT(ret == 0 || qdata->qd_count);
224 /* compute the remaining quota for certain gid or uid b=11693 */
225 int compute_remquota(struct obd_device *obd,
226 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
228 struct super_block *sb = qctxt->lqc_sb;
230 struct obd_quotactl *qctl;
231 int ret = QUOTA_RET_OK;
232 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
235 if (!sb_any_quota_enabled(sb))
236 RETURN(QUOTA_RET_NOQUOTA);
238 /* ignore root user */
239 if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
240 RETURN(QUOTA_RET_NOLIMIT);
246 /* get fs quota usage & limit */
247 qctl->qc_cmd = Q_GETQUOTA;
248 qctl->qc_id = qdata->qd_id;
249 qctl->qc_type = qdata_type;
250 ret = fsfilt_quotactl(obd, sb, qctl);
252 if (ret == -ESRCH) /* no limit */
253 ret = QUOTA_RET_NOLIMIT;
255 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
260 usage = qctl->qc_dqblk.dqb_curspace;
261 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
262 if (!limit){ /* no limit */
263 ret = QUOTA_RET_NOLIMIT;
268 qdata->qd_count = limit - usage;
277 /* caller must hold qunit_hash_lock */
278 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
279 struct qunit_data *qdata)
281 unsigned int hashent = qunit_hashfn(qctxt, qdata);
282 struct lustre_qunit *qunit;
285 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
286 qunit = find_qunit(hashent, qctxt, qdata);
290 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
291 struct qunit_data *qdata, int opc)
293 struct lustre_qunit *qunit = NULL;
296 OBD_SLAB_ALLOC(qunit, qunit_cachep, SLAB_NOFS, sizeof(*qunit));
300 INIT_LIST_HEAD(&qunit->lq_hash);
301 INIT_LIST_HEAD(&qunit->lq_waiters);
302 atomic_set(&qunit->lq_refcnt, 1);
303 qunit->lq_ctxt = qctxt;
304 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
310 static inline void free_qunit(struct lustre_qunit *qunit)
312 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
315 static inline void qunit_get(struct lustre_qunit *qunit)
317 atomic_inc(&qunit->lq_refcnt);
320 static void qunit_put(struct lustre_qunit *qunit)
322 LASSERT(atomic_read(&qunit->lq_refcnt));
323 if (atomic_dec_and_test(&qunit->lq_refcnt))
328 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
330 struct list_head *head;
332 LASSERT(list_empty(&qunit->lq_hash));
333 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
334 list_add(&qunit->lq_hash, head);
337 static void remove_qunit_nolock(struct lustre_qunit *qunit)
339 LASSERT(!list_empty(&qunit->lq_hash));
340 list_del_init(&qunit->lq_hash);
343 struct qunit_waiter {
344 struct list_head qw_entry;
345 cfs_waitq_t qw_waitq;
349 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
350 (limit = count) : (limit += count)
353 /* FIXME check if this mds is the master of specified id */
355 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
356 unsigned int id, int type)
358 return qctxt->lqc_handler ? 1 : 0;
362 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
363 struct qunit_data *qdata, int opc, int wait);
365 static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
366 struct qunit_data *qdata, int opc, int wait)
369 struct qunit_data tmp_qdata;
373 if (qctxt->lqc_import)
374 while (should_translate_quota(qctxt->lqc_import) &&
375 qdata->qd_count > MAX_QUOTA_COUNT32) {
378 tmp_qdata.qd_count = MAX_QUOTA_COUNT32;
379 qdata->qd_count -= tmp_qdata.qd_count;
380 ret = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
385 if (qdata->qd_count){
386 ret = schedule_dqacq(obd, qctxt, qdata, opc, wait);
395 dqacq_completion(struct obd_device *obd,
396 struct lustre_quota_ctxt *qctxt,
397 struct qunit_data *qdata, int rc, int opc)
399 struct lustre_qunit *qunit = NULL;
400 struct super_block *sb = qctxt->lqc_sb;
401 unsigned long qunit_sz;
402 struct qunit_waiter *qw, *tmp;
404 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
405 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
406 __u64 qd_tmp = qdata->qd_count;
411 qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
412 div_r = do_div(qd_tmp, qunit_sz);
415 /* update local operational quota file */
417 __u32 count = QUSG(qdata->qd_count, is_blk);
418 struct obd_quotactl *qctl;
423 GOTO(out, err = -ENOMEM);
425 /* acq/rel qunit for specified uid/gid is serialized,
426 * so there is no race between get fs quota limit and
427 * set fs quota limit */
428 qctl->qc_cmd = Q_GETQUOTA;
429 qctl->qc_id = qdata->qd_id;
430 qctl->qc_type = qdata_type;
431 err = fsfilt_quotactl(obd, sb, qctl);
433 CERROR("error get quota fs limit! (rc:%d)\n", err);
438 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
439 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
441 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
442 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
447 INC_QLIMIT(*hardlimit, count);
450 LASSERT(count < *hardlimit);
457 /* clear quota limit */
461 qctl->qc_cmd = Q_SETQUOTA;
462 err = fsfilt_quotactl(obd, sb, qctl);
464 CERROR("error set quota fs limit! (rc:%d)\n", err);
466 QDATA_DEBUG(qdata, "%s completion\n",
467 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
470 } else if (rc == -EDQUOT) {
471 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
472 } else if (rc == -EBUSY) {
473 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
475 CERROR("acquire qunit got error! (rc:%d)\n", rc);
478 /* remove the qunit from hash */
479 spin_lock(&qunit_hash_lock);
481 qunit = dqacq_in_flight(qctxt, qdata);
482 /* this qunit has been removed by qctxt_cleanup() */
484 spin_unlock(&qunit_hash_lock);
488 LASSERT(opc == qunit->lq_opc);
489 remove_qunit_nolock(qunit);
491 /* wake up all waiters */
492 list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
493 list_del_init(&qw->qw_entry);
495 wake_up(&qw->qw_waitq);
498 spin_unlock(&qunit_hash_lock);
502 /* don't reschedule in such cases:
503 * - acq/rel failure, but not for quota recovery.
504 * - local dqacq/dqrel.
505 * - local disk io failure.
507 if (err || (rc && rc != -EBUSY) ||
508 is_master(obd, qctxt, qdata->qd_id, qdata_type))
511 /* reschedule another dqacq/dqrel if needed */
513 rc = check_cur_qunit(obd, qctxt, qdata);
516 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
517 rc = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
518 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc);
523 struct dqacq_async_args {
524 struct lustre_quota_ctxt *aa_ctxt;
525 struct lustre_qunit *aa_qunit;
528 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
530 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
531 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
532 struct lustre_qunit *qunit = aa->aa_qunit;
533 struct obd_device *obd = req->rq_import->imp_obd;
534 struct qunit_data *qdata = NULL;
535 struct qunit_data_old *qdata_old = NULL;
539 LASSERT(req->rq_import);
540 if ((req->rq_import->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) &&
541 !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
542 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
543 qdata = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(*qdata), lustre_swab_qdata);
545 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
546 qdata_old = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(struct qunit_data_old),
547 lustre_swab_qdata_old);
548 qdata = lustre_quota_old_to_new(qdata_old);
551 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data\n");
555 LASSERT(qdata->qd_id == qunit->lq_data.qd_id &&
556 (qdata->qd_flags & QUOTA_IS_GRP) == (qunit->lq_data.qd_flags & QUOTA_IS_GRP) &&
557 (qdata->qd_count == qunit->lq_data.qd_count ||
558 qdata->qd_count == 0));
560 QDATA_DEBUG(qdata, "%s interpret rc(%d).\n",
561 lustre_msg_get_opc(req->rq_reqmsg) == QUOTA_DQACQ ?
562 "DQACQ" : "DQREL", rc);
564 rc = dqacq_completion(obd, qctxt, qdata, rc,
565 lustre_msg_get_opc(req->rq_reqmsg));
570 static int got_qunit(struct qunit_waiter *waiter)
574 spin_lock(&qunit_hash_lock);
575 rc = list_empty(&waiter->qw_entry);
576 spin_unlock(&qunit_hash_lock);
581 schedule_dqacq(struct obd_device *obd,
582 struct lustre_quota_ctxt *qctxt,
583 struct qunit_data *qdata, int opc, int wait)
585 struct lustre_qunit *qunit, *empty;
586 struct qunit_waiter qw;
587 struct l_wait_info lwi = { 0 };
588 struct ptlrpc_request *req;
589 struct qunit_data *reqdata;
590 struct dqacq_async_args *aa;
591 int size[2] = { sizeof(struct ptlrpc_body), sizeof(*reqdata) };
595 INIT_LIST_HEAD(&qw.qw_entry);
596 init_waitqueue_head(&qw.qw_waitq);
599 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
602 spin_lock(&qunit_hash_lock);
604 qunit = dqacq_in_flight(qctxt, qdata);
607 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
608 spin_unlock(&qunit_hash_lock);
611 goto wait_completion;
614 insert_qunit_nolock(qctxt, qunit);
616 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
617 spin_unlock(&qunit_hash_lock);
621 /* master is going to dqacq/dqrel from itself */
622 if (is_master(obd, qctxt, qdata->qd_id, qdata->qd_flags & QUOTA_IS_GRP)) {
624 QDATA_DEBUG(qdata, "local %s.\n",
625 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
626 rc = qctxt->lqc_handler(obd, qdata, opc);
627 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
628 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
631 /* build dqacq/dqrel request */
632 LASSERT(qctxt->lqc_import);
633 req = ptlrpc_prep_req(qctxt->lqc_import, LUSTRE_MDS_VERSION, opc, 2,
636 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
640 LASSERT(!should_translate_quota(qctxt->lqc_import) ||
641 qdata->qd_count <= MAX_QUOTA_COUNT32);
642 if (should_translate_quota(qctxt->lqc_import) ||
643 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
645 struct qunit_data_old *reqdata_old, *tmp;
647 reqdata_old = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF,
648 sizeof(*reqdata_old));
649 tmp = lustre_quota_new_to_old(qdata);
651 size[1] = sizeof(*reqdata_old);
652 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
654 reqdata = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF,
657 size[1] = sizeof(*reqdata);
658 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
660 ptlrpc_req_set_repsize(req, 2, size);
662 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
663 aa = (struct dqacq_async_args *)&req->rq_async_args;
665 aa->aa_qunit = qunit;
667 req->rq_interpret_reply = dqacq_interpret;
668 ptlrpcd_add_req(req);
670 QDATA_DEBUG(qdata, "%s scheduled.\n",
671 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
674 struct qunit_data *p = &qunit->lq_data;
675 QDATA_DEBUG(p, "wait for dqacq.\n");
677 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
681 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
687 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
688 uid_t uid, gid_t gid, __u32 isblk, int wait)
690 int ret, rc = 0, i = USRQUOTA;
691 __u32 id[MAXQUOTAS] = { uid, gid };
692 struct qunit_data qdata[MAXQUOTAS];
695 CLASSERT(MAXQUOTAS < 4);
696 if (!sb_any_quota_enabled(qctxt->lqc_sb))
699 for (i = 0; i < MAXQUOTAS; i++) {
700 qdata[i].qd_id = id[i];
701 qdata[i].qd_flags = 0;
702 qdata[i].qd_flags |= i;
703 qdata[i].qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
704 qdata[i].qd_count = 0;
706 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
709 /* need acquire or release */
710 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
711 ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i],
722 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
723 unsigned short type, int isblk)
725 struct lustre_qunit *qunit = NULL;
726 struct qunit_waiter qw;
727 struct qunit_data qdata;
728 struct l_wait_info lwi = { 0 };
731 INIT_LIST_HEAD(&qw.qw_entry);
732 init_waitqueue_head(&qw.qw_waitq);
737 qdata.qd_flags |= type;
738 qdata.qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
741 spin_lock(&qunit_hash_lock);
743 qunit = dqacq_in_flight(qctxt, &qdata);
745 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
747 spin_unlock(&qunit_hash_lock);
750 struct qunit_data *p = &qdata;
751 QDATA_DEBUG(p, "wait for dqacq completion.\n");
752 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
753 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
759 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
760 dqacq_handler_t handler)
765 rc = ptlrpcd_addref();
769 qctxt->lqc_handler = handler;
771 qctxt->lqc_import = NULL;
772 qctxt->lqc_recovery = 0;
773 qctxt->lqc_atype = 0;
774 qctxt->lqc_status= 0;
775 qctxt->lqc_bunit_sz = default_bunit_sz;
776 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
777 qctxt->lqc_iunit_sz = default_iunit_sz;
778 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
783 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
785 struct lustre_qunit *qunit, *tmp;
786 struct qunit_waiter *qw, *tmp2;
790 spin_lock(&qunit_hash_lock);
792 for (i = 0; i < NR_DQHASH; i++) {
793 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
794 if (qunit->lq_ctxt != qctxt)
797 remove_qunit_nolock(qunit);
798 /* wake up all waiters */
799 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
801 list_del_init(&qw->qw_entry);
803 wake_up(&qw->qw_waitq);
809 spin_unlock(&qunit_hash_lock);
816 struct qslave_recov_thread_data {
817 struct obd_device *obd;
818 struct lustre_quota_ctxt *qctxt;
819 struct completion comp;
822 /* FIXME only recovery block quota by now */
823 static int qslave_recovery_main(void *arg)
825 struct qslave_recov_thread_data *data = arg;
826 struct obd_device *obd = data->obd;
827 struct lustre_quota_ctxt *qctxt = data->qctxt;
832 ptlrpc_daemonize("qslave_recovd");
834 complete(&data->comp);
836 if (qctxt->lqc_recovery)
838 qctxt->lqc_recovery = 1;
840 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
841 struct qunit_data qdata;
842 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
843 struct list_head id_list;
844 struct dquot_id *dqid, *tmp;
847 LOCK_DQONOFF_MUTEX(dqopt);
848 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
849 UNLOCK_DQONOFF_MUTEX(dqopt);
853 LASSERT(dqopt->files[type] != NULL);
854 INIT_LIST_HEAD(&id_list);
855 #ifndef KERNEL_SUPPORTS_QUOTA_READ
856 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
858 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
860 UNLOCK_DQONOFF_MUTEX(dqopt);
862 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
864 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
865 list_del_init(&dqid->di_link);
866 /* skip slave recovery on itself */
867 if (is_master(obd, qctxt, dqid->di_id, type))
869 if (rc && rc != -EBUSY)
872 qdata.qd_id = dqid->di_id;
874 qdata.qd_flags |= type;
875 qdata.qd_flags |= QUOTA_IS_BLOCK;
878 ret = check_cur_qunit(obd, qctxt, &qdata);
881 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
882 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
887 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
888 "qslave recovery failed! (id:%d type:%d "
889 " rc:%d)\n", dqid->di_id, type, rc);
895 qctxt->lqc_recovery = 0;
900 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
902 struct qslave_recov_thread_data data;
906 if (!sb_any_quota_enabled(qctxt->lqc_sb))
911 init_completion(&data.comp);
913 rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
915 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
918 wait_for_completion(&data.comp);