1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/quota/quota_context.c
38 * Lustre Quota Context
40 * Author: Niu YaWei <niu@clusterfs.com>
44 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_LQUOTA
49 #include <linux/version.h>
51 #include <asm/unistd.h>
52 #include <linux/slab.h>
53 #include <linux/quotaops.h>
54 #include <linux/module.h>
55 #include <linux/init.h>
57 #include <obd_class.h>
58 #include <lustre_quota.h>
59 #include <lustre_fsfilt.h>
60 #include <class_hash.h>
61 #include <lprocfs_status.h>
62 #include "quota_internal.h"
64 #ifdef HAVE_QUOTA_SUPPORT
66 static lustre_hash_ops_t lqs_hash_ops;
68 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
69 unsigned long default_btune_ratio = 50; /* 50 percentage */
70 unsigned long default_iunit_sz = 5120; /* 5120 inodes */
71 unsigned long default_itune_ratio = 50; /* 50 percentage */
73 cfs_mem_cache_t *qunit_cachep = NULL;
74 struct list_head qunit_hash[NR_DQHASH];
75 spinlock_t qunit_hash_lock = SPIN_LOCK_UNLOCKED;
77 /* please sync qunit_state with qunit_state_names */
84 * a qunit is added into qunit hash, that means
85 * a quota req will be sent or is flying
89 * a qunit is removed from qunit hash, that
90 * means a quota req is handled and comes back
92 QUNIT_RM_FROM_HASH = 2,
94 * qunit can wake up all threads waiting for it
99 static const char *qunit_state_names[] = {
100 [QUNIT_CREATED] = "CREATED",
101 [QUNIT_IN_HASH] = "IN_HASH",
102 [QUNIT_RM_FROM_HASH] = "RM_FROM_HASH",
103 [QUNIT_FINISHED] = "FINISHED",
106 struct lustre_qunit {
107 struct list_head lq_hash; /** Hash list in memory */
108 atomic_t lq_refcnt; /** Use count */
109 struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
110 struct qunit_data lq_data; /** See qunit_data */
111 unsigned int lq_opc; /** QUOTA_DQACQ, QUOTA_DQREL */
112 cfs_waitq_t lq_waitq; /** Threads waiting for this qunit */
113 spinlock_t lq_lock; /** Protect the whole structure */
114 enum qunit_state lq_state; /** Present the status of qunit */
115 int lq_rc; /** The rc of lq_data */
119 #define QUNIT_SET_STATE(qunit, state) \
121 spin_lock(&qunit->lq_lock); \
122 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
123 "lq_rc(%d), lq_owner(%d)\n", \
124 qunit, qunit_state_names[qunit->lq_state], \
125 qunit_state_names[state], qunit->lq_rc, \
127 qunit->lq_state = state; \
128 spin_unlock(&qunit->lq_lock); \
131 #define QUNIT_SET_STATE_AND_RC(qunit, state, rc) \
133 spin_lock(&qunit->lq_lock); \
135 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
136 "lq_rc(%d), lq_owner(%d)\n", \
137 qunit, qunit_state_names[qunit->lq_state], \
138 qunit_state_names[state], qunit->lq_rc, \
140 qunit->lq_state = state; \
141 spin_unlock(&qunit->lq_lock); \
145 int should_translate_quota (struct obd_import *imp)
150 if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
156 void qunit_cache_cleanup(void)
161 spin_lock(&qunit_hash_lock);
162 for (i = 0; i < NR_DQHASH; i++)
163 LASSERT(list_empty(qunit_hash + i));
164 spin_unlock(&qunit_hash_lock);
168 rc = cfs_mem_cache_destroy(qunit_cachep);
169 LASSERTF(rc == 0, "couldn't destroy qunit_cache slab\n");
175 int qunit_cache_init(void)
180 LASSERT(qunit_cachep == NULL);
181 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
182 sizeof(struct lustre_qunit),
187 spin_lock(&qunit_hash_lock);
188 for (i = 0; i < NR_DQHASH; i++)
189 CFS_INIT_LIST_HEAD(qunit_hash + i);
190 spin_unlock(&qunit_hash_lock);
195 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
196 __attribute__((__const__));
199 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
201 unsigned int id = qdata->qd_id;
202 unsigned int type = QDATA_IS_GRP(qdata);
204 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
205 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
209 /* caller must hold qunit_hash_lock */
210 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
211 struct lustre_quota_ctxt *qctxt,
212 struct qunit_data *qdata)
214 struct lustre_qunit *qunit = NULL;
215 struct qunit_data *tmp;
217 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
218 list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
219 tmp = &qunit->lq_data;
220 if (qunit->lq_ctxt == qctxt &&
221 qdata->qd_id == tmp->qd_id &&
222 (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
223 (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
229 /* check_cur_qunit - check the current usage of qunit.
230 * @qctxt: quota context
231 * @qdata: the type of quota unit to be checked
233 * return: 1 - need acquire qunit;
234 * 2 - need release qunit;
235 * 0 - need do nothing.
239 check_cur_qunit(struct obd_device *obd,
240 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
242 struct super_block *sb = qctxt->lqc_sb;
243 unsigned long qunit_sz, tune_sz;
244 __u64 usage, limit, limit_org, pending_write = 0;
245 long long record = 0;
246 struct obd_quotactl *qctl;
247 struct lustre_qunit_size *lqs = NULL;
251 if (!sb_any_quota_enabled(sb))
254 spin_lock(&qctxt->lqc_lock);
255 if (!qctxt->lqc_valid){
256 spin_unlock(&qctxt->lqc_lock);
259 spin_unlock(&qctxt->lqc_lock);
265 /* get fs quota usage & limit */
266 qctl->qc_cmd = Q_GETQUOTA;
267 qctl->qc_id = qdata->qd_id;
268 qctl->qc_type = QDATA_IS_GRP(qdata);
269 ret = fsfilt_quotactl(obd, sb, qctl);
271 if (ret == -ESRCH) /* no limit */
274 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
278 if (QDATA_IS_BLK(qdata)) {
279 usage = qctl->qc_dqblk.dqb_curspace;
280 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
282 usage = qctl->qc_dqblk.dqb_curinodes;
283 limit = qctl->qc_dqblk.dqb_ihardlimit;
286 /* ignore the no quota limit case; and it can avoid creating
287 * unnecessary lqs for uid/gid */
292 quota_search_lqs(qdata, NULL, qctxt, &lqs);
294 CDEBUG(D_QUOTA, "Can't find the lustre qunit size!\n");
295 ret = quota_create_lqs(qdata, NULL, qctxt, &lqs);
296 if (ret == -EALREADY) {
303 spin_lock(&lqs->lqs_lock);
305 if (QDATA_IS_BLK(qdata)) {
306 qunit_sz = lqs->lqs_bunit_sz;
307 tune_sz = lqs->lqs_btune_sz;
308 pending_write = lqs->lqs_bwrite_pending;
309 record = lqs->lqs_blk_rec;
310 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
312 /* we didn't need change inode qunit size now */
313 qunit_sz = lqs->lqs_iunit_sz;
314 tune_sz = lqs->lqs_itune_sz;
315 pending_write = lqs->lqs_iwrite_pending;
316 record = lqs->lqs_ino_rec;
319 /* we don't count the MIN_QLIMIT */
320 if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
321 (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
324 usage += pending_write;
326 /* when a releasing quota req is sent, before it returned
327 limit is assigned a small value. limit will overflow */
328 if (limit + record < 0)
333 LASSERT(qdata->qd_count == 0);
334 if (limit <= usage + tune_sz) {
335 while (qdata->qd_count + limit <=
337 qdata->qd_count += qunit_sz;
339 } else if (limit > usage + qunit_sz + tune_sz &&
340 limit_org > qdata->qd_count + qunit_sz) {
341 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
342 limit_org > qdata->qd_count + qunit_sz)
343 qdata->qd_count += qunit_sz;
345 /* if there are other pending writes for this uid/gid, releasing
346 * quota is put off until the last pending write b=16645 */
347 if (ret == 2 && pending_write) {
348 CDEBUG(D_QUOTA, "delay quota release\n");
352 CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
353 ", pending_write: "LPU64", record: "LPD64
354 ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
355 QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
356 record, qunit_sz, tune_sz, ret);
357 LASSERT(ret == 0 || qdata->qd_count);
359 spin_unlock(&lqs->lqs_lock);
368 * Compute the remaining quota for certain gid or uid b=11693
370 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
371 struct qunit_data *qdata, int isblk)
373 struct super_block *sb = qctxt->lqc_sb;
375 struct obd_quotactl *qctl;
376 int ret = QUOTA_RET_OK;
379 if (!sb_any_quota_enabled(sb))
380 RETURN(QUOTA_RET_NOQUOTA);
382 /* ignore root user */
383 if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
384 RETURN(QUOTA_RET_NOLIMIT);
390 /* get fs quota usage & limit */
391 qctl->qc_cmd = Q_GETQUOTA;
392 qctl->qc_id = qdata->qd_id;
393 qctl->qc_type = QDATA_IS_GRP(qdata);
394 ret = fsfilt_quotactl(obd, sb, qctl);
396 if (ret == -ESRCH) /* no limit */
397 ret = QUOTA_RET_NOLIMIT;
399 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
404 usage = isblk ? qctl->qc_dqblk.dqb_curspace :
405 qctl->qc_dqblk.dqb_curinodes;
406 limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
407 qctl->qc_dqblk.dqb_ihardlimit;
408 if (!limit){ /* no limit */
409 ret = QUOTA_RET_NOLIMIT;
414 qdata->qd_count = limit - usage;
423 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
424 struct qunit_data *qdata, int opc)
426 struct lustre_qunit *qunit = NULL;
429 OBD_SLAB_ALLOC_PTR_GFP(qunit, qunit_cachep, CFS_ALLOC_IO);
433 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
434 init_waitqueue_head(&qunit->lq_waitq);
435 atomic_set(&qunit->lq_refcnt, 1);
436 qunit->lq_ctxt = qctxt;
437 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
439 qunit->lq_lock = SPIN_LOCK_UNLOCKED;
440 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
441 qunit->lq_owner = cfs_curproc_pid();
445 static inline void free_qunit(struct lustre_qunit *qunit)
447 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
450 static inline void qunit_get(struct lustre_qunit *qunit)
452 atomic_inc(&qunit->lq_refcnt);
455 static void qunit_put(struct lustre_qunit *qunit)
457 LASSERT(atomic_read(&qunit->lq_refcnt));
458 if (atomic_dec_and_test(&qunit->lq_refcnt))
462 /* caller must hold qunit_hash_lock and release ref of qunit after using it */
463 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
464 struct qunit_data *qdata)
466 unsigned int hashent = qunit_hashfn(qctxt, qdata);
467 struct lustre_qunit *qunit;
470 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
471 qunit = find_qunit(hashent, qctxt, qdata);
478 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
480 struct list_head *head;
482 LASSERT(list_empty(&qunit->lq_hash));
484 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
485 list_add(&qunit->lq_hash, head);
486 QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
489 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
491 struct lustre_qunit_size *lqs = NULL;
493 quota_search_lqs(&qunit->lq_data, NULL, qunit->lq_ctxt, &lqs);
495 spin_lock(&lqs->lqs_lock);
496 if (qunit->lq_opc == QUOTA_DQACQ)
497 quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
498 if (qunit->lq_opc == QUOTA_DQREL)
499 quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
500 spin_unlock(&lqs->lqs_lock);
501 /* this is for quota_search_lqs */
503 /* this is for schedule_dqacq */
508 static void remove_qunit_nolock(struct lustre_qunit *qunit)
510 LASSERT(!list_empty(&qunit->lq_hash));
511 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
513 list_del_init(&qunit->lq_hash);
514 QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
518 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
519 (limit = count) : (limit += count)
522 static inline int is_master(struct lustre_quota_ctxt *qctxt)
524 return qctxt->lqc_handler ? 1 : 0;
528 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
529 struct qunit_data *qdata, int opc, int wait,
530 struct obd_trans_info *oti);
533 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
534 struct qunit_data *qdata, int rc, int opc)
536 struct lustre_qunit *qunit = NULL;
537 struct super_block *sb = qctxt->lqc_sb;
539 struct quota_adjust_qunit *oqaq = NULL;
544 QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
545 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
547 /* do it only when a releasing quota req more than 5MB b=18491 */
548 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880)
549 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
551 /* update local operational quota file */
553 __u64 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
554 struct obd_quotactl *qctl;
559 GOTO(out, err = -ENOMEM);
561 /* acq/rel qunit for specified uid/gid is serialized,
562 * so there is no race between get fs quota limit and
563 * set fs quota limit */
564 qctl->qc_cmd = Q_GETQUOTA;
565 qctl->qc_id = qdata->qd_id;
566 qctl->qc_type = QDATA_IS_GRP(qdata);
567 err = fsfilt_quotactl(obd, sb, qctl);
569 CERROR("error get quota fs limit! (rc:%d)\n", err);
573 if (QDATA_IS_BLK(qdata)) {
574 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
575 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
577 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
578 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
581 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
588 INC_QLIMIT(*hardlimit, count);
591 LASSERTF(count < *hardlimit,
592 "id(%u) flag(%u) type(%c) isblk(%c) "
593 "count("LPU64") qd_qunit("LPU64") "
594 "hardlimit("LPU64").\n",
595 qdata->qd_id, qdata->qd_flags,
596 QDATA_IS_GRP(qdata) ? 'g' : 'u',
597 QDATA_IS_BLK(qdata) ? 'b': 'i',
598 qdata->qd_count, qdata->qd_qunit, *hardlimit);
605 /* clear quota limit */
609 qctl->qc_cmd = Q_SETQUOTA;
610 err = fsfilt_quotactl(obd, sb, qctl);
612 CERROR("error set quota fs limit! (rc:%d)\n", err);
614 QDATA_DEBUG(qdata, "%s completion\n",
615 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
618 } else if (rc == -EDQUOT) {
619 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
620 } else if (rc == -EBUSY) {
621 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
623 CERROR("acquire qunit got error! (rc:%d)\n", rc);
626 /* remove the qunit from hash */
627 spin_lock(&qunit_hash_lock);
629 qunit = dqacq_in_flight(qctxt, qdata);
630 /* this qunit has been removed by qctxt_cleanup() */
632 spin_unlock(&qunit_hash_lock);
633 QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
634 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
638 LASSERT(opc == qunit->lq_opc);
639 /* remove this qunit from lq_hash so that new processes cannot be added
640 * to qunit->lq_waiters */
641 remove_qunit_nolock(qunit);
642 spin_unlock(&qunit_hash_lock);
644 compute_lqs_after_removing_qunit(qunit);
647 rc = QUOTA_REQ_RETURNED;
648 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
649 /* wake up all waiters */
650 wake_up_all(&qunit->lq_waitq);
652 /* this is for dqacq_in_flight() */
654 /* this is for alloc_qunit() */
656 if (rc < 0 && rc != -EDQUOT)
659 /* don't reschedule in such cases:
660 * - acq/rel failure and qunit isn't changed,
661 * but not for quota recovery.
662 * - local dqacq/dqrel.
663 * - local disk io failure.
668 qdata_to_oqaq(qdata, oqaq);
669 /* adjust the qunit size in slaves */
670 rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
673 CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
676 if (err || (rc < 0 && rc != -EBUSY && rc1 == 0) || is_master(qctxt))
679 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880 &&
680 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_DELAY_REL))
683 /* reschedule another dqacq/dqrel if needed */
685 qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
686 rc1 = check_cur_qunit(obd, qctxt, qdata);
689 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
690 rc1 = schedule_dqacq(obd, qctxt, qdata, opc, 0, NULL);
691 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
696 struct dqacq_async_args {
697 struct lustre_quota_ctxt *aa_ctxt;
698 struct lustre_qunit *aa_qunit;
701 static int dqacq_interpret(const struct lu_env *env,
702 struct ptlrpc_request *req, void *data, int rc)
704 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
705 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
706 struct obd_device_target *obt = qctxt->lqc_obt;
707 struct lustre_qunit *qunit = aa->aa_qunit;
708 struct obd_device *obd = req->rq_import->imp_obd;
709 struct qunit_data *qdata = NULL;
713 LASSERT(req->rq_import);
715 down_read(&obt->obt_rwsem);
716 /* if a quota req timeouts or is dropped, we should update quota
717 * statistics which will be handled in dqacq_completion. And in
718 * this situation we should get qdata from request instead of
720 qdata = quota_get_qdata(req, (rc != 0) ? QUOTA_REQUEST : QUOTA_REPLY,
724 DEBUG_REQ(D_ERROR, req,
725 "error unpacking qunit_data(rc: %ld)\n",
727 RETURN(PTR_ERR(qdata));
730 QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
731 QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
733 if (qdata->qd_id != qunit->lq_data.qd_id ||
734 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RET_QDATA)) {
735 CDEBUG(D_ERROR, "the returned qd_id isn't expected!"
736 "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
737 qunit->lq_data.qd_id);
738 qdata->qd_id = qunit->lq_data.qd_id;
741 if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
742 CDEBUG(D_ERROR, "the returned grp/usr isn't expected!"
743 "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
744 qunit->lq_data.qd_flags);
745 if (QDATA_IS_GRP(&qunit->lq_data))
746 QDATA_SET_GRP(qdata);
748 QDATA_CLR_GRP(qdata);
751 if (qdata->qd_count > qunit->lq_data.qd_count) {
752 CDEBUG(D_ERROR, "the returned qd_count isn't expected!"
753 "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
754 qunit->lq_data.qd_count);
758 rc = dqacq_completion(obd, qctxt, qdata, rc,
759 lustre_msg_get_opc(req->rq_reqmsg));
761 up_read(&obt->obt_rwsem);
766 * check if quota master is online
768 int check_qm(struct lustre_quota_ctxt *qctxt)
773 spin_lock(&qctxt->lqc_lock);
774 /* quit waiting when mds is back or qctxt is cleaned up */
775 rc = qctxt->lqc_import || !qctxt->lqc_valid;
776 spin_unlock(&qctxt->lqc_lock);
781 /* wake up all waiting threads when lqc_import is NULL */
782 void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
784 struct lustre_qunit *qunit, *tmp;
788 spin_lock(&qunit_hash_lock);
789 for (i = 0; i < NR_DQHASH; i++) {
790 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
791 if (qunit->lq_ctxt != qctxt)
794 /* Wake up all waiters. Do not change lq_state.
795 * The waiters will check lq_rc which is kept as 0
796 * if no others change it, then the waiters will return
797 * -EAGAIN to caller who can perform related quota
798 * acq/rel if necessary. */
799 wake_up_all(&qunit->lq_waitq);
802 spin_unlock(&qunit_hash_lock);
806 static int got_qunit(struct lustre_qunit *qunit, int is_master)
808 struct lustre_quota_ctxt *qctxt = qunit->lq_ctxt;
812 spin_lock(&qunit->lq_lock);
813 switch (qunit->lq_state) {
815 case QUNIT_RM_FROM_HASH:
821 CERROR("invalid qunit state %d\n", qunit->lq_state);
823 spin_unlock(&qunit->lq_lock);
826 spin_lock(&qctxt->lqc_lock);
827 rc = !qctxt->lqc_valid;
829 rc |= !qctxt->lqc_import;
830 spin_unlock(&qctxt->lqc_lock);
837 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
838 struct qunit_data *qdata, int opc, int wait,
839 struct obd_trans_info *oti)
841 struct lustre_qunit *qunit, *empty;
842 struct l_wait_info lwi = { 0 };
843 struct ptlrpc_request *req;
844 struct dqacq_async_args *aa;
845 struct obd_import *imp = NULL;
846 struct lustre_qunit_size *lqs = NULL;
847 struct timeval work_start;
848 struct timeval work_end;
853 LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
854 do_gettimeofday(&work_start);
855 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL)
858 spin_lock(&qunit_hash_lock);
859 qunit = dqacq_in_flight(qctxt, qdata);
861 spin_unlock(&qunit_hash_lock);
864 goto wait_completion;
868 insert_qunit_nolock(qctxt, qunit);
869 spin_unlock(&qunit_hash_lock);
871 quota_search_lqs(qdata, NULL, qctxt, &lqs);
873 spin_lock(&lqs->lqs_lock);
874 quota_compute_lqs(qdata, lqs, 1, (opc == QUOTA_DQACQ) ? 1 : 0);
875 /* when this qdata returned from mds, it will call lqs_putref */
877 spin_unlock(&lqs->lqs_lock);
878 /* this is for quota_search_lqs */
881 CDEBUG(D_ERROR, "Can't find the lustre qunit size!\n");
884 QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
885 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
886 /* master is going to dqacq/dqrel from itself */
887 if (is_master(qctxt)) {
889 QDATA_DEBUG(qdata, "local %s.\n",
890 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
891 QDATA_SET_CHANGE_QS(qdata);
892 rc = qctxt->lqc_handler(obd, qdata, opc);
893 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
894 /* this is for qunit_get() */
897 do_gettimeofday(&work_end);
898 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
899 if (opc == QUOTA_DQACQ)
900 lprocfs_counter_add(qctxt->lqc_stats,
901 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
904 lprocfs_counter_add(qctxt->lqc_stats,
905 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
907 RETURN(rc ? rc : rc2);
910 spin_lock(&qctxt->lqc_lock);
911 if (!qctxt->lqc_import) {
912 spin_unlock(&qctxt->lqc_lock);
913 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
915 spin_lock(&qunit_hash_lock);
916 remove_qunit_nolock(qunit);
917 spin_unlock(&qunit_hash_lock);
919 compute_lqs_after_removing_qunit(qunit);
921 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
922 wake_up_all(&qunit->lq_waitq);
924 /* this is for qunit_get() */
926 /* this for alloc_qunit() */
928 spin_lock(&qctxt->lqc_lock);
929 if (wait && !qctxt->lqc_import) {
930 spin_unlock(&qctxt->lqc_lock);
932 LASSERT(oti && oti->oti_thread &&
933 oti->oti_thread->t_watchdog);
935 lc_watchdog_disable(oti->oti_thread->t_watchdog);
936 CDEBUG(D_QUOTA, "sleep for quota master\n");
937 l_wait_event(qctxt->lqc_wait_for_qmaster,
938 check_qm(qctxt), &lwi);
939 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
940 lc_watchdog_touch(oti->oti_thread->t_watchdog,
941 GET_TIMEOUT(oti->oti_thread->t_svc));
943 spin_unlock(&qctxt->lqc_lock);
948 imp = class_import_get(qctxt->lqc_import);
949 spin_unlock(&qctxt->lqc_lock);
951 /* build dqacq/dqrel request */
954 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_QUOTA_DQACQ,
955 LUSTRE_MDS_VERSION, opc);
956 class_import_put(imp);
958 CDEBUG(D_ERROR, "Can't alloc request\n");
959 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
960 /* this is for qunit_get() */
965 ptlrpc_request_set_replen(req);
966 req->rq_no_resend = req->rq_no_delay = 1;
967 rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
969 CDEBUG(D_ERROR, "Can't pack qunit_data(rc: %d)\n", rc);
970 ptlrpc_req_finished(req);
971 dqacq_completion(obd, qctxt, qdata, -EPROTO, opc);
972 /* this is for qunit_get() */
977 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
978 aa = ptlrpc_req_async_args(req);
980 aa->aa_qunit = qunit;
982 req->rq_interpret_reply = dqacq_interpret;
983 ptlrpcd_add_req(req, PSCOPE_OTHER);
985 QDATA_DEBUG(qdata, "%s scheduled.\n",
986 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
989 struct qunit_data *p = &qunit->lq_data;
991 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
992 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
994 /* rc = -EAGAIN, it means the quota master isn't ready yet
995 * rc = QUOTA_REQ_RETURNED, it means a quota req is finished;
996 * rc = -EDQUOT, it means out of quota
997 * rc = -EBUSY, it means recovery is happening
998 * other rc < 0, it means real errors, functions who call
999 * schedule_dqacq should take care of this */
1000 spin_lock(&qunit->lq_lock);
1002 spin_unlock(&qunit->lq_lock);
1003 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
1004 "rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
1005 qunit->lq_data.qd_flags, rc, qunit->lq_owner);
1009 do_gettimeofday(&work_end);
1010 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1011 if (opc == QUOTA_DQACQ)
1012 lprocfs_counter_add(qctxt->lqc_stats,
1013 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1016 lprocfs_counter_add(qctxt->lqc_stats,
1017 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1024 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
1025 const unsigned int id[], __u32 isblk, int wait,
1026 struct obd_trans_info *oti)
1028 int rc = 0, i = USRQUOTA;
1029 struct qunit_data qdata[MAXQUOTAS];
1032 CLASSERT(MAXQUOTAS < 4);
1033 if (!sb_any_quota_enabled(qctxt->lqc_sb))
1036 for (i = 0; i < MAXQUOTAS; i++) {
1037 qdata[i].qd_id = id[i];
1038 qdata[i].qd_flags = i;
1040 QDATA_SET_BLK(&qdata[i]);
1041 qdata[i].qd_count = 0;
1043 rc = check_cur_qunit(obd, qctxt, &qdata[i]);
1046 /* need acquire or release */
1047 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1048 rc = schedule_dqacq(obd, qctxt, &qdata[i], opc,
1052 } else if (wait == 1) {
1053 /* when wait equates 1, that means mds_quota_acquire
1054 * or filter_quota_acquire is calling it. */
1055 rc = qctxt_wait_pending_dqacq(qctxt, id[i], i, isblk);
1065 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
1066 unsigned short type, int isblk)
1068 struct lustre_qunit *qunit = NULL;
1069 struct qunit_data qdata;
1070 struct timeval work_start;
1071 struct timeval work_end;
1073 struct l_wait_info lwi = { 0 };
1077 do_gettimeofday(&work_start);
1079 qdata.qd_flags = type;
1081 QDATA_SET_BLK(&qdata);
1084 spin_lock(&qunit_hash_lock);
1085 qunit = dqacq_in_flight(qctxt, &qdata);
1086 spin_unlock(&qunit_hash_lock);
1089 struct qunit_data *p = &qunit->lq_data;
1091 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1092 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1094 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
1095 "owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
1096 /* keep same as schedule_dqacq() b=17030 */
1097 spin_lock(&qunit->lq_lock);
1099 spin_unlock(&qunit->lq_lock);
1100 /* this is for dqacq_in_flight() */
1102 do_gettimeofday(&work_end);
1103 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1104 lprocfs_counter_add(qctxt->lqc_stats,
1105 isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
1106 LQUOTA_WAIT_PENDING_INO_QUOTA,
1109 do_gettimeofday(&work_end);
1110 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1111 lprocfs_counter_add(qctxt->lqc_stats,
1112 isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
1113 LQUOTA_NOWAIT_PENDING_INO_QUOTA,
1121 qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
1123 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
1124 struct obd_device_target *obt = &obd->u.obt;
1125 struct super_block *sb = obt->obt_sb;
1131 rc = ptlrpcd_addref();
1135 cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
1136 spin_lock_init(&qctxt->lqc_lock);
1137 spin_lock(&qctxt->lqc_lock);
1138 qctxt->lqc_handler = handler;
1140 qctxt->lqc_obt = obt;
1141 qctxt->lqc_import = NULL;
1142 qctxt->lqc_recovery = 0;
1143 qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
1144 qctxt->lqc_valid = 1;
1145 qctxt->lqc_cqs_boundary_factor = 4;
1146 qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
1147 qctxt->lqc_cqs_least_iunit = 2;
1148 qctxt->lqc_cqs_qs_factor = 2;
1149 qctxt->lqc_flags = 0;
1150 QUOTA_MASTER_UNREADY(qctxt);
1151 qctxt->lqc_bunit_sz = default_bunit_sz;
1152 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
1153 qctxt->lqc_iunit_sz = default_iunit_sz;
1154 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
1155 qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
1156 * after the last shrinking */
1157 qctxt->lqc_sync_blk = 0;
1158 spin_unlock(&qctxt->lqc_lock);
1160 qctxt->lqc_lqs_hash = lustre_hash_init("LQS_HASH",
1164 if (!qctxt->lqc_lqs_hash) {
1165 CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
1170 rc = lquota_proc_setup(obd, is_master(qctxt));
1172 CERROR("initialize proc for %s error!\n", obd->obd_name);
1178 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
1180 struct lustre_qunit *qunit, *tmp;
1181 struct list_head tmp_list;
1182 struct obd_device_target *obt = qctxt->lqc_obt;
1186 CFS_INIT_LIST_HEAD(&tmp_list);
1188 spin_lock(&qctxt->lqc_lock);
1189 qctxt->lqc_valid = 0;
1190 spin_unlock(&qctxt->lqc_lock);
1192 spin_lock(&qunit_hash_lock);
1193 for (i = 0; i < NR_DQHASH; i++) {
1194 list_for_each_entry_safe(qunit, tmp, &qunit_hash[i], lq_hash) {
1195 if (qunit->lq_ctxt != qctxt)
1197 remove_qunit_nolock(qunit);
1198 list_add(&qunit->lq_hash, &tmp_list);
1201 spin_unlock(&qunit_hash_lock);
1203 list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
1204 list_del_init(&qunit->lq_hash);
1205 compute_lqs_after_removing_qunit(qunit);
1207 /* wake up all waiters */
1208 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
1209 wake_up_all(&qunit->lq_waitq);
1213 down_write(&obt->obt_rwsem);
1214 lustre_hash_exit(qctxt->lqc_lqs_hash);
1215 qctxt->lqc_lqs_hash = NULL;
1216 up_write(&obt->obt_rwsem);
1218 /* after qctxt_cleanup, qctxt might be freed, then check_qm() is
1219 * unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
1220 while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
1221 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
1222 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
1223 cfs_time_seconds(1));
1229 if (lquota_proc_cleanup(qctxt))
1230 CERROR("cleanup proc error!\n");
1236 struct qslave_recov_thread_data {
1237 struct obd_device *obd;
1238 struct lustre_quota_ctxt *qctxt;
1239 struct completion comp;
1242 /* FIXME only recovery block quota by now */
1243 static int qslave_recovery_main(void *arg)
1245 struct qslave_recov_thread_data *data = arg;
1246 struct obd_device *obd = data->obd;
1247 struct lustre_quota_ctxt *qctxt = data->qctxt;
1252 ptlrpc_daemonize("qslave_recovd");
1255 class_incref(obd, "qslave_recovd_filter", obd);
1257 complete(&data->comp);
1259 spin_lock(&qctxt->lqc_lock);
1260 if (qctxt->lqc_recovery) {
1261 spin_unlock(&qctxt->lqc_lock);
1262 class_decref(obd, "qslave_recovd_filter", obd);
1265 qctxt->lqc_recovery = 1;
1266 spin_unlock(&qctxt->lqc_lock);
1269 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1270 struct qunit_data qdata;
1271 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1272 struct list_head id_list;
1273 struct dquot_id *dqid, *tmp;
1276 LOCK_DQONOFF_MUTEX(dqopt);
1277 if (!sb_has_quota_enabled(qctxt->lqc_sb, type)) {
1278 UNLOCK_DQONOFF_MUTEX(dqopt);
1282 LASSERT(dqopt->files[type] != NULL);
1283 CFS_INIT_LIST_HEAD(&id_list);
1284 #ifndef KERNEL_SUPPORTS_QUOTA_READ
1285 rc = fsfilt_qids(obd, dqopt->files[type], NULL, type, &id_list);
1287 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1289 UNLOCK_DQONOFF_MUTEX(dqopt);
1291 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1293 list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1294 list_del_init(&dqid->di_link);
1295 /* skip slave recovery on itself */
1296 if (is_master(qctxt))
1298 if (rc && rc != -EBUSY)
1301 qdata.qd_id = dqid->di_id;
1302 qdata.qd_flags = type;
1303 QDATA_SET_BLK(&qdata);
1306 ret = check_cur_qunit(obd, qctxt, &qdata);
1309 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1310 rc = schedule_dqacq(obd, qctxt, &qdata, opc,
1319 CDEBUG(rc == -EBUSY ? D_QUOTA : D_ERROR,
1320 "qslave recovery failed! (id:%d type:%d "
1321 " rc:%d)\n", dqid->di_id, type, rc);
1327 spin_lock(&qctxt->lqc_lock);
1328 qctxt->lqc_recovery = 0;
1329 spin_unlock(&qctxt->lqc_lock);
1330 class_decref(obd, "qslave_recovd_filter", obd);
1335 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1337 struct qslave_recov_thread_data data;
1341 if (!sb_any_quota_enabled(qctxt->lqc_sb))
1346 init_completion(&data.comp);
1348 rc = kernel_thread(qslave_recovery_main, &data, CLONE_VM|CLONE_FILES);
1350 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1353 wait_for_completion(&data.comp);
1360 * lqs<->qctxt hash operations
1364 * string hashing using djb2 hash algorithm
1367 lqs_hash(lustre_hash_t *lh, void *key, unsigned mask)
1369 struct quota_adjust_qunit *lqs_key;
1374 lqs_key = (struct quota_adjust_qunit *)key;
1375 hash = (QAQ_IS_GRP(lqs_key) ? 5381 : 5387) * lqs_key->qaq_id;
1377 RETURN(hash & mask);
1381 lqs_compare(void *key, struct hlist_node *hnode)
1383 struct quota_adjust_qunit *lqs_key;
1384 struct lustre_qunit_size *q;
1389 lqs_key = (struct quota_adjust_qunit *)key;
1390 q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1392 spin_lock(&q->lqs_lock);
1393 rc = ((lqs_key->qaq_id == q->lqs_id) &&
1394 (QAQ_IS_GRP(lqs_key) == LQS_IS_GRP(q)));
1395 spin_unlock(&q->lqs_lock);
1401 lqs_get(struct hlist_node *hnode)
1403 struct lustre_qunit_size *q =
1404 hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1407 atomic_inc(&q->lqs_refcount);
1408 CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
1409 q, atomic_read(&q->lqs_refcount));
1415 lqs_put(struct hlist_node *hnode)
1417 struct lustre_qunit_size *q =
1418 hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1421 LASSERT(atomic_read(&q->lqs_refcount) > 0);
1422 atomic_dec(&q->lqs_refcount);
1423 CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
1424 q, atomic_read(&q->lqs_refcount));
1430 lqs_exit(struct hlist_node *hnode)
1432 struct lustre_qunit_size *q;
1435 q = hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1437 * Nothing should be left. User of lqs put it and
1438 * lqs also was deleted from table by this time
1439 * so we should have 0 refs.
1441 LASSERTF(atomic_read(&q->lqs_refcount) == 0,
1442 "Busy lqs %p with %d refs\n", q,
1443 atomic_read(&q->lqs_refcount));
1448 static lustre_hash_ops_t lqs_hash_ops = {
1449 .lh_hash = lqs_hash,
1450 .lh_compare = lqs_compare,
1455 #endif /* HAVE_QUOTA_SUPPORT */