1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/quota/quota_context.c
7 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * Author: Niu YaWei <niu@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * No redistribution or use is permitted outside of Cluster File Systems, Inc.
16 # define EXPORT_SYMTAB
19 #define DEBUG_SUBSYSTEM S_MDS
21 #include <linux/version.h>
23 #include <asm/unistd.h>
24 #include <linux/slab.h>
25 #include <linux/quotaops.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <obd_class.h>
30 #include <lustre_quota.h>
31 #include <lustre_fsfilt.h>
32 #include "quota_internal.h"
34 unsigned long default_bunit_sz = 100 * 1024 * 1024; /* 100M bytes */
35 unsigned long default_btune_ratio = 50; /* 50 percentage */
36 unsigned long default_iunit_sz = 5000; /* 5000 inodes */
37 unsigned long default_itune_ratio = 50; /* 50 percentage */
38 unsigned long default_limit_sz = 20 * 1024 * 1024;
40 cfs_mem_cache_t *qunit_cachep = NULL;
41 struct list_head qunit_hash[NR_DQHASH];
42 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
45 struct list_head lq_hash; /* Hash list in memory */
46 atomic_t lq_refcnt; /* Use count */
47 struct lustre_quota_ctxt *lq_ctxt; /* Quota context this applies to */
48 struct qunit_data lq_data; /* See qunit_data */
49 unsigned int lq_opc; /* QUOTA_DQACQ, QUOTA_DQREL */
50 struct list_head lq_waiters; /* All write threads waiting for this qunit */
53 int should_translate_quota (struct obd_import *imp)
58 if ((imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) &&
59 !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT))
65 void qunit_cache_cleanup(void)
70 spin_lock(&qunit_hash_lock);
71 for (i = 0; i < NR_DQHASH; i++)
72 LASSERT(list_empty(qunit_hash + i));
73 spin_unlock(&qunit_hash_lock);
77 rc = cfs_mem_cache_destroy(qunit_cachep);
78 LASSERTF(rc == 0, "couldn't destory qunit_cache slab\n");
84 int qunit_cache_init(void)
89 LASSERT(qunit_cachep == NULL);
90 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
91 sizeof(struct lustre_qunit),
96 spin_lock(&qunit_hash_lock);
97 for (i = 0; i < NR_DQHASH; i++)
98 INIT_LIST_HEAD(qunit_hash + i);
99 spin_unlock(&qunit_hash_lock);
104 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
105 __attribute__((__const__));
108 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
110 unsigned int id = qdata->qd_id;
111 unsigned int type = qdata->qd_flags & QUOTA_IS_GRP;
113 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
114 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
118 /* caller must hold qunit_hash_lock */
119 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
120 struct lustre_quota_ctxt *qctxt,
121 struct qunit_data *qdata)
123 struct lustre_qunit *qunit = NULL;
124 struct qunit_data *tmp;
126 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
127 list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
128 tmp = &qunit->lq_data;
129 if (qunit->lq_ctxt == qctxt &&
130 qdata->qd_id == tmp->qd_id && qdata->qd_flags == tmp->qd_flags)
136 /* check_cur_qunit - check the current usage of qunit.
137 * @qctxt: quota context
138 * @qdata: the type of quota unit to be checked
140 * return: 1 - need acquire qunit;
141 * 2 - need release qunit;
142 * 0 - need do nothing.
146 check_cur_qunit(struct obd_device *obd,
147 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
149 struct super_block *sb = qctxt->lqc_sb;
150 unsigned long qunit_sz, tune_sz;
152 struct obd_quotactl *qctl;
154 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
155 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
158 if (!sb_any_quota_enabled(sb))
165 /* get fs quota usage & limit */
166 qctl->qc_cmd = Q_GETQUOTA;
167 qctl->qc_id = qdata->qd_id;
168 qctl->qc_type = qdata_type;
169 ret = fsfilt_quotactl(obd, sb, qctl);
171 if (ret == -ESRCH) /* no limit */
174 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
179 usage = qctl->qc_dqblk.dqb_curspace;
180 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
181 qunit_sz = qctxt->lqc_bunit_sz;
182 tune_sz = qctxt->lqc_btune_sz;
184 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
186 usage = qctl->qc_dqblk.dqb_curinodes;
187 limit = qctl->qc_dqblk.dqb_ihardlimit;
188 qunit_sz = qctxt->lqc_iunit_sz;
189 tune_sz = qctxt->lqc_itune_sz;
192 /* ignore the no quota limit case */
196 /* we don't count the MIN_QLIMIT */
197 if ((limit == MIN_QLIMIT && !is_blk) ||
198 (toqb(limit) == MIN_QLIMIT && is_blk))
201 LASSERT(qdata->qd_count == 0);
202 if (limit <= usage + tune_sz) {
203 while (qdata->qd_count + limit <= usage + tune_sz)
204 qdata->qd_count += qunit_sz;
206 } else if (limit > usage + qunit_sz + tune_sz) {
207 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz)
208 qdata->qd_count += qunit_sz;
211 LASSERT(ret == 0 || qdata->qd_count);
218 /* compute the remaining quota for certain gid or uid b=11693 */
219 int compute_remquota(struct obd_device *obd,
220 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
222 struct super_block *sb = qctxt->lqc_sb;
224 struct obd_quotactl *qctl;
225 int ret = QUOTA_RET_OK;
226 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
229 if (!sb_any_quota_enabled(sb))
230 RETURN(QUOTA_RET_NOQUOTA);
232 /* ignore root user */
233 if (qdata->qd_id == 0 && qdata_type == USRQUOTA)
234 RETURN(QUOTA_RET_NOLIMIT);
240 /* get fs quota usage & limit */
241 qctl->qc_cmd = Q_GETQUOTA;
242 qctl->qc_id = qdata->qd_id;
243 qctl->qc_type = qdata_type;
244 ret = fsfilt_quotactl(obd, sb, qctl);
246 if (ret == -ESRCH) /* no limit */
247 ret = QUOTA_RET_NOLIMIT;
249 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
254 usage = qctl->qc_dqblk.dqb_curspace;
255 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
256 if (!limit){ /* no limit */
257 ret = QUOTA_RET_NOLIMIT;
262 qdata->qd_count = limit - usage;
271 /* caller must hold qunit_hash_lock */
272 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
273 struct qunit_data *qdata)
275 unsigned int hashent = qunit_hashfn(qctxt, qdata);
276 struct lustre_qunit *qunit;
279 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
280 qunit = find_qunit(hashent, qctxt, qdata);
284 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
285 struct qunit_data *qdata, int opc)
287 struct lustre_qunit *qunit = NULL;
290 OBD_SLAB_ALLOC(qunit, qunit_cachep, CFS_ALLOC_IO, sizeof(*qunit));
294 INIT_LIST_HEAD(&qunit->lq_hash);
295 INIT_LIST_HEAD(&qunit->lq_waiters);
296 atomic_set(&qunit->lq_refcnt, 1);
297 qunit->lq_ctxt = qctxt;
298 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
304 static inline void free_qunit(struct lustre_qunit *qunit)
306 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
309 static inline void qunit_get(struct lustre_qunit *qunit)
311 atomic_inc(&qunit->lq_refcnt);
314 static void qunit_put(struct lustre_qunit *qunit)
316 LASSERT(atomic_read(&qunit->lq_refcnt));
317 if (atomic_dec_and_test(&qunit->lq_refcnt))
322 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
324 struct list_head *head;
326 LASSERT(list_empty(&qunit->lq_hash));
327 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
328 list_add(&qunit->lq_hash, head);
331 static void remove_qunit_nolock(struct lustre_qunit *qunit)
333 LASSERT(!list_empty(&qunit->lq_hash));
334 list_del_init(&qunit->lq_hash);
337 struct qunit_waiter {
338 struct list_head qw_entry;
339 cfs_waitq_t qw_waitq;
343 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
344 (limit = count) : (limit += count)
347 /* FIXME check if this mds is the master of specified id */
349 is_master(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
350 unsigned int id, int type)
352 return qctxt->lqc_handler ? 1 : 0;
356 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
357 struct qunit_data *qdata, int opc, int wait);
359 static int split_before_schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
360 struct qunit_data *qdata, int opc, int wait)
363 unsigned long factor;
364 struct qunit_data tmp_qdata;
367 LASSERT(qdata && qdata->qd_count);
368 QDATA_DEBUG(qdata, "%s quota split.\n",
369 (qdata->qd_flags & QUOTA_IS_BLOCK) ? "block" : "inode");
370 if (qdata->qd_flags & QUOTA_IS_BLOCK)
371 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
374 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
377 if (qctxt->lqc_import && should_translate_quota(qctxt->lqc_import) &&
378 qdata->qd_count > factor) {
380 tmp_qdata.qd_count = factor;
381 qdata->qd_count -= tmp_qdata.qd_count;
382 QDATA_DEBUG((&tmp_qdata), "be split.\n");
383 rc = schedule_dqacq(obd, qctxt, &tmp_qdata, opc, wait);
385 QDATA_DEBUG(qdata, "don't be split.\n");
386 rc = schedule_dqacq(obd, qctxt, qdata, opc, wait);
393 dqacq_completion(struct obd_device *obd,
394 struct lustre_quota_ctxt *qctxt,
395 struct qunit_data *qdata, int rc, int opc)
397 struct lustre_qunit *qunit = NULL;
398 struct super_block *sb = qctxt->lqc_sb;
399 unsigned long qunit_sz;
400 struct qunit_waiter *qw, *tmp;
402 __u32 qdata_type = qdata->qd_flags & QUOTA_IS_GRP;
403 __u32 is_blk = (qdata->qd_flags & QUOTA_IS_BLOCK) >> 1;
404 __u64 qd_tmp = qdata->qd_count;
409 qunit_sz = is_blk ? qctxt->lqc_bunit_sz : qctxt->lqc_iunit_sz;
410 div_r = do_div(qd_tmp, qunit_sz);
411 LASSERTF(!div_r, "qunit_sz: %lu, return qunit_sz: "LPU64"\n",
414 /* update local operational quota file */
416 __u32 count = QUSG(qdata->qd_count, is_blk);
417 struct obd_quotactl *qctl;
422 GOTO(out, err = -ENOMEM);
424 /* acq/rel qunit for specified uid/gid is serialized,
425 * so there is no race between get fs quota limit and
426 * set fs quota limit */
427 qctl->qc_cmd = Q_GETQUOTA;
428 qctl->qc_id = qdata->qd_id;
429 qctl->qc_type = qdata_type;
430 err = fsfilt_quotactl(obd, sb, qctl);
432 CERROR("error get quota fs limit! (rc:%d)\n", err);
437 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
438 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
440 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
441 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
446 CDEBUG(D_QUOTA, "%s(acq):count: %d, hardlimt: "LPU64
447 ",type: %s.\n", obd->obd_name, count, *hardlimit,
448 qdata_type ? "grp": "usr");
449 INC_QLIMIT(*hardlimit, count);
452 CDEBUG(D_QUOTA, "%s(rel):count: %d, hardlimt: "LPU64
453 ",type: %s.\n", obd->obd_name, count, *hardlimit,
454 qdata_type ? "grp": "usr");
455 LASSERTF(count < *hardlimit,
456 "count: %d, hardlimit: "LPU64".\n",
464 /* clear quota limit */
468 qctl->qc_cmd = Q_SETQUOTA;
469 err = fsfilt_quotactl(obd, sb, qctl);
471 CERROR("error set quota fs limit! (rc:%d)\n", err);
473 QDATA_DEBUG(qdata, "%s completion\n",
474 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
477 } else if (rc == -EDQUOT) {
478 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
479 } else if (rc == -EBUSY) {
480 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
482 CERROR("acquire qunit got error! (rc:%d)\n", rc);
485 /* remove the qunit from hash */
486 spin_lock(&qunit_hash_lock);
488 qunit = dqacq_in_flight(qctxt, qdata);
489 /* this qunit has been removed by qctxt_cleanup() */
491 spin_unlock(&qunit_hash_lock);
495 LASSERT(opc == qunit->lq_opc);
496 remove_qunit_nolock(qunit);
498 /* wake up all waiters */
499 list_for_each_entry_safe(qw, tmp, &qunit->lq_waiters, qw_entry) {
500 list_del_init(&qw->qw_entry);
502 wake_up(&qw->qw_waitq);
505 spin_unlock(&qunit_hash_lock);
509 /* don't reschedule in such cases:
510 * - acq/rel failure, but not for quota recovery.
511 * - local dqacq/dqrel.
512 * - local disk io failure.
514 if (err || (rc && rc != -EBUSY) ||
515 is_master(obd, qctxt, qdata->qd_id, qdata_type))
518 /* reschedule another dqacq/dqrel if needed */
520 rc = check_cur_qunit(obd, qctxt, qdata);
523 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
524 rc = split_before_schedule_dqacq(obd, qctxt, qdata, opc, 0);
525 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc);
530 struct dqacq_async_args {
531 struct lustre_quota_ctxt *aa_ctxt;
532 struct lustre_qunit *aa_qunit;
535 static int dqacq_interpret(struct ptlrpc_request *req, void *data, int rc)
537 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
538 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
539 struct lustre_qunit *qunit = aa->aa_qunit;
540 struct obd_device *obd = req->rq_import->imp_obd;
541 struct qunit_data *qdata = NULL;
542 struct qunit_data_old *qdata_old = NULL;
546 LASSERT(req->rq_import);
547 if ((req->rq_import->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64) &&
548 !OBD_FAIL_CHECK(OBD_FAIL_QUOTA_QD_COUNT_32BIT)) {
549 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
550 qdata = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(*qdata), lustre_swab_qdata);
552 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
553 qdata_old = lustre_swab_reqbuf(req, REPLY_REC_OFF, sizeof(struct qunit_data_old),
554 lustre_swab_qdata_old);
555 qdata = lustre_quota_old_to_new(qdata_old);
558 DEBUG_REQ(D_ERROR, req, "error unpacking qunit_data");
562 LASSERT(qdata->qd_id == qunit->lq_data.qd_id &&
563 (qdata->qd_flags & QUOTA_IS_GRP) == (qunit->lq_data.qd_flags & QUOTA_IS_GRP) &&
564 (qdata->qd_count == qunit->lq_data.qd_count ||
565 qdata->qd_count == 0));
567 QDATA_DEBUG(qdata, "%s interpret rc(%d).\n",
568 lustre_msg_get_opc(req->rq_reqmsg) == QUOTA_DQACQ ?
569 "DQACQ" : "DQREL", rc);
571 rc = dqacq_completion(obd, qctxt, qdata, rc,
572 lustre_msg_get_opc(req->rq_reqmsg));
577 static int got_qunit(struct qunit_waiter *waiter)
581 spin_lock(&qunit_hash_lock);
582 rc = list_empty(&waiter->qw_entry);
583 spin_unlock(&qunit_hash_lock);
588 schedule_dqacq(struct obd_device *obd,
589 struct lustre_quota_ctxt *qctxt,
590 struct qunit_data *qdata, int opc, int wait)
592 struct lustre_qunit *qunit, *empty;
593 struct qunit_waiter qw;
594 struct l_wait_info lwi = { 0 };
595 struct ptlrpc_request *req;
596 struct qunit_data *reqdata;
597 struct dqacq_async_args *aa;
598 int size[2] = { sizeof(struct ptlrpc_body), sizeof(*reqdata) };
599 struct obd_import *imp = NULL;
600 unsigned long factor;
604 INIT_LIST_HEAD(&qw.qw_entry);
605 init_waitqueue_head(&qw.qw_waitq);
608 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
611 spin_lock(&qunit_hash_lock);
613 qunit = dqacq_in_flight(qctxt, qdata);
616 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
617 spin_unlock(&qunit_hash_lock);
620 goto wait_completion;
623 insert_qunit_nolock(qctxt, qunit);
625 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
626 spin_unlock(&qunit_hash_lock);
630 /* master is going to dqacq/dqrel from itself */
631 if (is_master(obd, qctxt, qdata->qd_id, qdata->qd_flags & QUOTA_IS_GRP)) {
633 QDATA_DEBUG(qdata, "local %s.\n",
634 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
635 rc = qctxt->lqc_handler(obd, qdata, opc);
636 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
637 RETURN((rc && rc != -EDQUOT) ? rc : rc2);
640 spin_lock(&qctxt->lqc_lock);
641 if (!qctxt->lqc_import) {
642 spin_unlock(&qctxt->lqc_lock);
643 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
644 spin_lock(&qunit_hash_lock);
646 list_del_init(&qw.qw_entry);
647 remove_qunit_nolock(qunit);
650 spin_unlock(&qunit_hash_lock);
653 imp = class_import_get(qctxt->lqc_import);
655 spin_unlock(&qctxt->lqc_lock);
657 /* build dqacq/dqrel request */
659 req = ptlrpc_prep_req(imp, LUSTRE_MDS_VERSION, opc, 2,
662 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
663 class_import_put(imp);
667 if (qdata->qd_flags & QUOTA_IS_BLOCK)
668 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_bunit_sz *
671 factor = MAX_QUOTA_COUNT32 / qctxt->lqc_iunit_sz *
674 LASSERTF(!should_translate_quota(imp) ||
675 qdata->qd_count <= factor,
676 "qd_count: "LPU64"; should_translate_quota: %d.\n",
677 qdata->qd_count, should_translate_quota(imp));
678 if (should_translate_quota(imp))
680 struct qunit_data_old *reqdata_old, *tmp;
682 reqdata_old = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF,
683 sizeof(*reqdata_old));
684 tmp = lustre_quota_new_to_old(qdata);
686 size[1] = sizeof(*reqdata_old);
687 CDEBUG(D_QUOTA, "qd_count is 32bit!\n");
689 reqdata = lustre_msg_buf(req->rq_reqmsg, REPLY_REC_OFF,
692 size[1] = sizeof(*reqdata);
693 CDEBUG(D_QUOTA, "qd_count is 64bit!\n");
695 ptlrpc_req_set_repsize(req, 2, size);
696 class_import_put(imp);
698 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
699 aa = (struct dqacq_async_args *)&req->rq_async_args;
701 aa->aa_qunit = qunit;
703 req->rq_interpret_reply = dqacq_interpret;
704 ptlrpcd_add_req(req);
706 QDATA_DEBUG(qdata, "%s scheduled.\n",
707 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
710 struct qunit_data *p = &qunit->lq_data;
711 QDATA_DEBUG(p, "wait for dqacq.\n");
713 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
717 CDEBUG(D_QUOTA, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
723 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
724 uid_t uid, gid_t gid, __u32 isblk, int wait)
726 int ret, rc = 0, i = USRQUOTA;
727 __u32 id[MAXQUOTAS] = { uid, gid };
728 struct qunit_data qdata[MAXQUOTAS];
731 CLASSERT(MAXQUOTAS < 4);
732 if (!sb_any_quota_enabled(qctxt->lqc_sb))
735 for (i = 0; i < MAXQUOTAS; i++) {
736 qdata[i].qd_id = id[i];
737 qdata[i].qd_flags = 0;
738 qdata[i].qd_flags |= i;
739 qdata[i].qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
740 qdata[i].qd_count = 0;
742 ret = check_cur_qunit(obd, qctxt, &qdata[i]);
745 /* need acquire or release */
746 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
747 ret = split_before_schedule_dqacq(obd, qctxt, &qdata[i],
758 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
759 unsigned short type, int isblk)
761 struct lustre_qunit *qunit = NULL;
762 struct qunit_waiter qw;
763 struct qunit_data qdata;
764 struct l_wait_info lwi = { 0 };
767 INIT_LIST_HEAD(&qw.qw_entry);
768 init_waitqueue_head(&qw.qw_waitq);
773 qdata.qd_flags |= type;
774 qdata.qd_flags |= isblk ? QUOTA_IS_BLOCK : 0;
777 spin_lock(&qunit_hash_lock);
779 qunit = dqacq_in_flight(qctxt, &qdata);
781 list_add_tail(&qw.qw_entry, &qunit->lq_waiters);
783 spin_unlock(&qunit_hash_lock);
786 struct qunit_data *p = &qdata;
787 QDATA_DEBUG(p, "wait for dqacq completion.\n");
788 l_wait_event(qw.qw_waitq, got_qunit(&qw), &lwi);
789 QDATA_DEBUG(p, "wait dqacq done. (rc:%d)\n", qw.qw_rc);
795 qctxt_init(struct lustre_quota_ctxt *qctxt, struct super_block *sb,
796 dqacq_handler_t handler)
801 rc = ptlrpcd_addref();
805 spin_lock_init(&qctxt->lqc_lock);
806 spin_lock(&qctxt->lqc_lock);
807 qctxt->lqc_handler = handler;
809 qctxt->lqc_import = NULL;
810 qctxt->lqc_recovery = 0;
811 qctxt->lqc_atype = 0;
812 qctxt->lqc_status= 0;
813 qctxt->lqc_bunit_sz = default_bunit_sz;
814 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
815 qctxt->lqc_iunit_sz = default_iunit_sz;
816 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
817 qctxt->lqc_limit_sz = default_limit_sz;
818 spin_unlock(&qctxt->lqc_lock);
823 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
825 struct lustre_qunit *qunit, *tmp;
826 struct qunit_waiter *qw, *tmp2;
830 spin_lock(&qunit_hash_lock);
832 for (i = 0; i < NR_DQHASH; i++) {
833 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
834 if (qunit->lq_ctxt != qctxt)
837 remove_qunit_nolock(qunit);
838 /* wake up all waiters */
839 list_for_each_entry_safe(qw, tmp2, &qunit->lq_waiters,
841 list_del_init(&qw->qw_entry);
843 wake_up(&qw->qw_waitq);
849 spin_unlock(&qunit_hash_lock);
856 struct qslave_recov_thread_data {
857 struct obd_device *obd;
858 struct lustre_quota_ctxt *qctxt;
859 struct completion comp;
862 /* FIXME only recovery block quota by now */
863 static int qslave_recovery_main(void *arg)
865 struct qslave_recov_thread_data *data = arg;
866 struct obd_device *obd = data->obd;
867 struct lustre_quota_ctxt *qctxt = data->qctxt;
872 ptlrpc_daemonize("qslave_recovd");
874 complete(&data->comp);
876 if (qctxt->lqc_recovery)
878 qctxt->lqc_recovery = 1;
880 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
881 struct qunit_data qdata;
882 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
883 struct list_head id_list;
884 struct dquot_id *dqid, *tmp;
887 LOCK_DQONOFF_MUTEX(dqopt);
888 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
889 UNLOCK_DQONOFF_MUTEX(dqopt);
893 LASSERT(dqopt->files[type] != NULL);
894 INIT_LIST_HEAD(&id_list);
895 #ifndef KERNEL_SUPPORTS_QUOTA_READ
896 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
898 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
900 UNLOCK_DQONOFF_MUTEX(dqopt);
902 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
904 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
905 list_del_init(&dqid->di_link);
906 /* skip slave recovery on itself */
907 if (is_master(obd, qctxt, dqid->di_id, type))
909 if (rc && rc != -EBUSY)
912 qdata.qd_id = dqid->di_id;
914 qdata.qd_flags |= type;
915 qdata.qd_flags |= QUOTA_IS_BLOCK;
918 ret = check_cur_qunit(obd, qctxt, &qdata);
921 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
922 rc = split_before_schedule_dqacq(obd, qctxt, &qdata, opc, 0);
927 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
928 "qslave recovery failed! (id:%d type:%d "
929 " rc:%d)\n", dqid->di_id, type, rc);
935 qctxt->lqc_recovery = 0;
940 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
942 struct qslave_recov_thread_data data;
946 if (!sb_any_quota_enabled(qctxt->lqc_sb))
951 init_completion(&data.comp);
953 rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
955 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
958 wait_for_completion(&data.comp);