4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/quota/quota_context.c
38 * Lustre Quota Context
40 * Author: Niu YaWei <niu@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_LQUOTA
45 #include <linux/version.h>
47 #include <asm/unistd.h>
48 #include <linux/slab.h>
49 #include <linux/quotaops.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
53 #include <obd_class.h>
54 #include <lustre_quota.h>
55 #include <lustre_fsfilt.h>
56 #include <lprocfs_status.h>
57 #include "quota_internal.h"
59 static int hash_lqs_cur_bits = HASH_LQS_CUR_BITS;
60 CFS_MODULE_PARM(hash_lqs_cur_bits, "i", int, 0444,
61 "the current bits of lqs hash");
63 static cfs_hash_ops_t lqs_hash_ops;
65 unsigned long default_bunit_sz = 128 * 1024 * 1024; /* 128M bytes */
66 unsigned long default_btune_ratio = 50; /* 50 percentage */
67 unsigned long default_iunit_sz = 5120; /* 5120 inodes */
68 unsigned long default_itune_ratio = 50; /* 50 percentage */
70 cfs_mem_cache_t *qunit_cachep = NULL;
71 cfs_list_t qunit_hash[NR_DQHASH];
72 cfs_spinlock_t qunit_hash_lock = CFS_SPIN_LOCK_UNLOCKED;
74 /* please sync qunit_state with qunit_state_names */
81 * a qunit is added into qunit hash, that means
82 * a quota req will be sent or is flying
86 * a qunit is removed from qunit hash, that
87 * means a quota req is handled and comes back
89 QUNIT_RM_FROM_HASH = 2,
91 * qunit can wake up all threads waiting for it
96 static const char *qunit_state_names[] = {
97 [QUNIT_CREATED] = "CREATED",
98 [QUNIT_IN_HASH] = "IN_HASH",
99 [QUNIT_RM_FROM_HASH] = "RM_FROM_HASH",
100 [QUNIT_FINISHED] = "FINISHED",
103 struct lustre_qunit {
104 cfs_list_t lq_hash; /** Hash list in memory */
105 cfs_atomic_t lq_refcnt; /** Use count */
106 struct lustre_quota_ctxt *lq_ctxt; /** Quota context this applies to */
107 struct qunit_data lq_data; /** See qunit_data */
108 unsigned int lq_opc; /** QUOTA_DQACQ, QUOTA_DQREL */
109 cfs_waitq_t lq_waitq; /** Threads waiting for this qunit */
110 cfs_spinlock_t lq_lock; /** Protect the whole structure */
111 enum qunit_state lq_state; /** Present the status of qunit */
112 int lq_rc; /** The rc of lq_data */
116 #define QUNIT_SET_STATE(qunit, state) \
118 cfs_spin_lock(&qunit->lq_lock); \
119 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
120 "lq_rc(%d), lq_owner(%d)\n", \
121 qunit, qunit_state_names[qunit->lq_state], \
122 qunit_state_names[state], qunit->lq_rc, \
124 qunit->lq_state = state; \
125 cfs_spin_unlock(&qunit->lq_lock); \
128 #define QUNIT_SET_STATE_AND_RC(qunit, state, rc) \
130 cfs_spin_lock(&qunit->lq_lock); \
132 QDATA_DEBUG((&qunit->lq_data), "qunit(%p) lq_state(%s->%s), " \
133 "lq_rc(%d), lq_owner(%d)\n", \
134 qunit, qunit_state_names[qunit->lq_state], \
135 qunit_state_names[state], qunit->lq_rc, \
137 qunit->lq_state = state; \
138 cfs_spin_unlock(&qunit->lq_lock); \
141 int should_translate_quota (struct obd_import *imp)
146 if (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_QUOTA64)
152 void qunit_cache_cleanup(void)
157 cfs_spin_lock(&qunit_hash_lock);
158 for (i = 0; i < NR_DQHASH; i++)
159 LASSERT(cfs_list_empty(qunit_hash + i));
160 cfs_spin_unlock(&qunit_hash_lock);
164 rc = cfs_mem_cache_destroy(qunit_cachep);
165 LASSERTF(rc == 0, "couldn't destroy qunit_cache slab\n");
171 int qunit_cache_init(void)
176 LASSERT(qunit_cachep == NULL);
177 qunit_cachep = cfs_mem_cache_create("ll_qunit_cache",
178 sizeof(struct lustre_qunit),
183 cfs_spin_lock(&qunit_hash_lock);
184 for (i = 0; i < NR_DQHASH; i++)
185 CFS_INIT_LIST_HEAD(qunit_hash + i);
186 cfs_spin_unlock(&qunit_hash_lock);
191 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
192 __attribute__((__const__));
195 qunit_hashfn(struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
197 unsigned int id = qdata->qd_id;
198 unsigned int type = QDATA_IS_GRP(qdata);
200 unsigned long tmp = ((unsigned long)qctxt >> L1_CACHE_SHIFT) ^ id;
201 tmp = (tmp * (MAXQUOTAS - type)) % NR_DQHASH;
205 /* caller must hold qunit_hash_lock */
206 static inline struct lustre_qunit *find_qunit(unsigned int hashent,
207 struct lustre_quota_ctxt *qctxt,
208 struct qunit_data *qdata)
210 struct lustre_qunit *qunit = NULL;
211 struct qunit_data *tmp;
213 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
214 cfs_list_for_each_entry(qunit, qunit_hash + hashent, lq_hash) {
215 tmp = &qunit->lq_data;
216 if (qunit->lq_ctxt == qctxt &&
217 qdata->qd_id == tmp->qd_id &&
218 (qdata->qd_flags & LQUOTA_QUNIT_FLAGS) ==
219 (tmp->qd_flags & LQUOTA_QUNIT_FLAGS))
225 /* check_cur_qunit - check the current usage of qunit.
226 * @qctxt: quota context
227 * @qdata: the type of quota unit to be checked
229 * return: 1 - need acquire qunit;
230 * 2 - need release qunit;
231 * 0 - need do nothing.
235 check_cur_qunit(struct obd_device *obd,
236 struct lustre_quota_ctxt *qctxt, struct qunit_data *qdata)
238 struct super_block *sb = qctxt->lqc_sb;
239 unsigned long qunit_sz, tune_sz;
240 __u64 usage, limit, limit_org, pending_write = 0;
241 long long record = 0;
242 struct obd_quotactl *qctl;
243 struct lustre_qunit_size *lqs = NULL;
247 if (!ll_sb_has_quota_active(sb, QDATA_IS_GRP(qdata)))
250 cfs_spin_lock(&qctxt->lqc_lock);
251 if (!qctxt->lqc_valid){
252 cfs_spin_unlock(&qctxt->lqc_lock);
255 cfs_spin_unlock(&qctxt->lqc_lock);
261 /* get fs quota usage & limit */
262 qctl->qc_cmd = Q_GETQUOTA;
263 qctl->qc_id = qdata->qd_id;
264 qctl->qc_type = QDATA_IS_GRP(qdata);
265 ret = fsfilt_quotactl(obd, sb, qctl);
267 if (ret == -ESRCH) /* no limit */
270 CERROR("can't get fs quota usage! (rc:%d)\n", ret);
274 if (QDATA_IS_BLK(qdata)) {
275 usage = qctl->qc_dqblk.dqb_curspace;
276 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
278 usage = qctl->qc_dqblk.dqb_curinodes;
279 limit = qctl->qc_dqblk.dqb_ihardlimit;
282 /* ignore the no quota limit case; and it can avoid creating
283 * unnecessary lqs for uid/gid */
287 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
289 if (IS_ERR(lqs) || lqs == NULL) {
290 CERROR("fail to find a lqs for %sid: %u)!\n",
291 QDATA_IS_GRP(qdata) ? "g" : "u", qdata->qd_id);
294 cfs_spin_lock(&lqs->lqs_lock);
296 if (QDATA_IS_BLK(qdata)) {
297 qunit_sz = lqs->lqs_bunit_sz;
298 tune_sz = lqs->lqs_btune_sz;
299 pending_write = lqs->lqs_bwrite_pending;
300 record = lqs->lqs_blk_rec;
301 LASSERT(!(qunit_sz % QUOTABLOCK_SIZE));
303 /* we didn't need change inode qunit size now */
304 qunit_sz = lqs->lqs_iunit_sz;
305 tune_sz = lqs->lqs_itune_sz;
306 pending_write = lqs->lqs_iwrite_pending;
307 record = lqs->lqs_ino_rec;
310 /* we don't count the MIN_QLIMIT */
311 if ((limit == MIN_QLIMIT && !QDATA_IS_BLK(qdata)) ||
312 (toqb(limit) == MIN_QLIMIT && QDATA_IS_BLK(qdata)))
315 usage += pending_write;
317 /* when a releasing quota req is sent, before it returned
318 limit is assigned a small value. limit will overflow */
324 LASSERT(qdata->qd_count == 0);
325 if (limit <= usage + tune_sz) {
326 while (qdata->qd_count + limit <=
328 qdata->qd_count += qunit_sz;
330 } else if (limit > usage + qunit_sz + tune_sz &&
331 limit_org > qdata->qd_count + qunit_sz) {
332 while (limit - qdata->qd_count > usage + qunit_sz + tune_sz &&
333 limit_org > qdata->qd_count + qunit_sz)
334 qdata->qd_count += qunit_sz;
336 /* if there are other pending writes for this uid/gid, releasing
337 * quota is put off until the last pending write b=16645 */
338 /* if there is an ongoing quota request, a releasing request is aborted.
339 * That ongoing quota request will call this function again when
340 * it returned b=18630 */
341 if (pending_write || record) {
342 CDEBUG(D_QUOTA, "delay quota release\n");
347 quota_compute_lqs(qdata, lqs, 1, (ret == 1) ? 1 : 0);
349 CDEBUG(D_QUOTA, "type: %c, limit: "LPU64", usage: "LPU64
350 ", pending_write: "LPU64", record: %lld"
351 ", qunit_sz: %lu, tune_sz: %lu, ret: %d.\n",
352 QDATA_IS_BLK(qdata) ? 'b' : 'i', limit, usage, pending_write,
353 record, qunit_sz, tune_sz, ret);
354 LASSERT(ret == 0 || qdata->qd_count);
356 cfs_spin_unlock(&lqs->lqs_lock);
366 * Compute the remaining quota for certain gid or uid b=11693
368 int compute_remquota(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
369 struct qunit_data *qdata, int isblk)
371 struct super_block *sb = qctxt->lqc_sb;
373 struct obd_quotactl *qctl;
374 int ret = QUOTA_RET_OK;
377 /* ignore root user */
378 if (qdata->qd_id == 0 && QDATA_IS_GRP(qdata) == USRQUOTA)
379 RETURN(QUOTA_RET_NOLIMIT);
385 /* get fs quota usage & limit */
386 qctl->qc_cmd = Q_GETQUOTA;
387 qctl->qc_id = qdata->qd_id;
388 qctl->qc_type = QDATA_IS_GRP(qdata);
389 ret = fsfilt_quotactl(obd, sb, qctl);
391 if (ret == -ESRCH) /* no limit */
392 ret = QUOTA_RET_NOLIMIT;
394 CDEBUG(D_QUOTA, "can't get fs quota usage! (rc:%d)",
399 usage = isblk ? qctl->qc_dqblk.dqb_curspace :
400 qctl->qc_dqblk.dqb_curinodes;
401 limit = isblk ? qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS :
402 qctl->qc_dqblk.dqb_ihardlimit;
403 if (!limit){ /* no limit */
404 ret = QUOTA_RET_NOLIMIT;
409 qdata->qd_count = limit - usage;
418 static struct lustre_qunit *alloc_qunit(struct lustre_quota_ctxt *qctxt,
419 struct qunit_data *qdata, int opc)
421 struct lustre_qunit *qunit = NULL;
424 OBD_SLAB_ALLOC_PTR_GFP(qunit, qunit_cachep, CFS_ALLOC_IO);
428 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
429 cfs_waitq_init(&qunit->lq_waitq);
430 cfs_atomic_set(&qunit->lq_refcnt, 1);
431 qunit->lq_ctxt = qctxt;
432 memcpy(&qunit->lq_data, qdata, sizeof(*qdata));
434 qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
435 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
436 qunit->lq_owner = cfs_curproc_pid();
440 static inline void free_qunit(struct lustre_qunit *qunit)
442 OBD_SLAB_FREE(qunit, qunit_cachep, sizeof(*qunit));
445 static inline void qunit_get(struct lustre_qunit *qunit)
447 cfs_atomic_inc(&qunit->lq_refcnt);
450 static void qunit_put(struct lustre_qunit *qunit)
452 LASSERT(cfs_atomic_read(&qunit->lq_refcnt));
453 if (cfs_atomic_dec_and_test(&qunit->lq_refcnt))
457 /* caller must hold qunit_hash_lock and release ref of qunit after using it */
458 static struct lustre_qunit *dqacq_in_flight(struct lustre_quota_ctxt *qctxt,
459 struct qunit_data *qdata)
461 unsigned int hashent = qunit_hashfn(qctxt, qdata);
462 struct lustre_qunit *qunit;
465 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
466 qunit = find_qunit(hashent, qctxt, qdata);
473 insert_qunit_nolock(struct lustre_quota_ctxt *qctxt, struct lustre_qunit *qunit)
477 LASSERT(cfs_list_empty(&qunit->lq_hash));
479 head = qunit_hash + qunit_hashfn(qctxt, &qunit->lq_data);
480 cfs_list_add(&qunit->lq_hash, head);
481 QUNIT_SET_STATE(qunit, QUNIT_IN_HASH);
484 static void compute_lqs_after_removing_qunit(struct lustre_qunit *qunit)
486 struct lustre_qunit_size *lqs;
488 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(&qunit->lq_data),
489 qunit->lq_data.qd_id),
491 if (lqs && !IS_ERR(lqs)) {
492 cfs_spin_lock(&lqs->lqs_lock);
493 if (qunit->lq_opc == QUOTA_DQACQ)
494 quota_compute_lqs(&qunit->lq_data, lqs, 0, 1);
495 if (qunit->lq_opc == QUOTA_DQREL)
496 quota_compute_lqs(&qunit->lq_data, lqs, 0, 0);
497 cfs_spin_unlock(&lqs->lqs_lock);
498 /* this is for quota_search_lqs */
500 /* this is for schedule_dqacq */
505 static void remove_qunit_nolock(struct lustre_qunit *qunit)
507 LASSERT(!cfs_list_empty(&qunit->lq_hash));
508 LASSERT_SPIN_LOCKED(&qunit_hash_lock);
510 cfs_list_del_init(&qunit->lq_hash);
511 QUNIT_SET_STATE(qunit, QUNIT_RM_FROM_HASH);
515 void* quota_barrier(struct lustre_quota_ctxt *qctxt,
516 struct obd_quotactl *oqctl, int isblk)
518 struct lustre_qunit *qunit, *find_qunit;
521 OBD_SLAB_ALLOC_PTR(qunit, qunit_cachep);
523 CERROR("locating %sunit failed for %sid %u\n",
524 isblk ? "b" : "i", oqctl->qc_type ? "g" : "u",
526 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
527 oqctl->qc_type, isblk);
531 CFS_INIT_LIST_HEAD(&qunit->lq_hash);
532 qunit->lq_lock = CFS_SPIN_LOCK_UNLOCKED;
533 cfs_waitq_init(&qunit->lq_waitq);
534 cfs_atomic_set(&qunit->lq_refcnt, 1);
535 qunit->lq_ctxt = qctxt;
536 qunit->lq_data.qd_id = oqctl->qc_id;
537 qunit->lq_data.qd_flags = oqctl->qc_type;
539 QDATA_SET_BLK(&qunit->lq_data);
540 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_CREATED, 0);
541 /* it means it is only an invalid qunit for barrier */
542 qunit->lq_opc = QUOTA_LAST_OPC;
545 cfs_spin_lock(&qunit_hash_lock);
546 find_qunit = dqacq_in_flight(qctxt, &qunit->lq_data);
548 cfs_spin_unlock(&qunit_hash_lock);
549 qunit_put(find_qunit);
550 qctxt_wait_pending_dqacq(qctxt, oqctl->qc_id,
551 oqctl->qc_type, isblk);
552 CDEBUG(D_QUOTA, "cycle=%d\n", cycle++);
557 insert_qunit_nolock(qctxt, qunit);
558 cfs_spin_unlock(&qunit_hash_lock);
562 void quota_unbarrier(void *handle)
564 struct lustre_qunit *qunit = (struct lustre_qunit *)handle;
567 CERROR("handle is NULL\n");
571 LASSERT(qunit->lq_opc == QUOTA_LAST_OPC);
572 cfs_spin_lock(&qunit_hash_lock);
573 remove_qunit_nolock(qunit);
574 cfs_spin_unlock(&qunit_hash_lock);
575 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, QUOTA_REQ_RETURNED);
576 cfs_waitq_signal(&qunit->lq_waitq);
580 #define INC_QLIMIT(limit, count) (limit == MIN_QLIMIT) ? \
581 (limit = count) : (limit += count)
584 static inline int is_master(struct lustre_quota_ctxt *qctxt)
586 return qctxt->lqc_handler ? 1 : 0;
590 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
591 struct qunit_data *qdata, int opc, int wait,
592 struct obd_trans_info *oti);
594 static inline void qdata_to_oqaq(struct qunit_data *qdata,
595 struct quota_adjust_qunit *oqaq)
600 oqaq->qaq_flags = qdata->qd_flags;
601 oqaq->qaq_id = qdata->qd_id;
602 if (QDATA_IS_ADJBLK(qdata))
603 oqaq->qaq_bunit_sz = qdata->qd_qunit;
604 if (QDATA_IS_ADJINO(qdata))
605 oqaq->qaq_iunit_sz = qdata->qd_qunit;
609 dqacq_completion(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
610 struct qunit_data *qdata, int rc, int opc)
612 struct lustre_qunit *qunit = NULL;
613 struct super_block *sb = qctxt->lqc_sb;
615 struct quota_adjust_qunit *oqaq = NULL;
620 QDATA_DEBUG(qdata, "obd(%s): complete %s quota req\n",
621 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
623 /* do it only when a releasing quota req more than 5MB b=18491 */
624 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880)
625 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
627 /* update local operational quota file */
629 __u64 count = QUSG(qdata->qd_count, QDATA_IS_BLK(qdata));
630 struct obd_quotactl *qctl;
635 GOTO(out, err = -ENOMEM);
637 /* acq/rel qunit for specified uid/gid is serialized,
638 * so there is no race between get fs quota limit and
639 * set fs quota limit */
640 qctl->qc_cmd = Q_GETQUOTA;
641 qctl->qc_id = qdata->qd_id;
642 qctl->qc_type = QDATA_IS_GRP(qdata);
643 err = fsfilt_quotactl(obd, sb, qctl);
645 CERROR("error get quota fs limit! (rc:%d)\n", err);
649 if (QDATA_IS_BLK(qdata)) {
650 qctl->qc_dqblk.dqb_valid = QIF_BLIMITS;
651 hardlimit = &qctl->qc_dqblk.dqb_bhardlimit;
653 qctl->qc_dqblk.dqb_valid = QIF_ILIMITS;
654 hardlimit = &qctl->qc_dqblk.dqb_ihardlimit;
657 CDEBUG(D_QUOTA, "hardlimt: "LPU64"\n", *hardlimit);
664 INC_QLIMIT(*hardlimit, count);
667 LASSERTF(count < *hardlimit,
668 "id(%u) flag(%u) type(%c) isblk(%c) "
669 "count("LPU64") qd_qunit("LPU64") "
670 "hardlimit("LPU64").\n",
671 qdata->qd_id, qdata->qd_flags,
672 QDATA_IS_GRP(qdata) ? 'g' : 'u',
673 QDATA_IS_BLK(qdata) ? 'b': 'i',
674 qdata->qd_count, qdata->qd_qunit, *hardlimit);
681 /* clear quota limit */
685 qctl->qc_cmd = Q_SETQUOTA;
686 err = fsfilt_quotactl(obd, sb, qctl);
688 CERROR("error set quota fs limit! (rc:%d)\n", err);
690 QDATA_DEBUG(qdata, "%s completion\n",
691 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
694 } else if (rc == -EDQUOT) {
695 QDATA_DEBUG(qdata, "acquire qunit got EDQUOT.\n");
696 } else if (rc == -EBUSY) {
697 QDATA_DEBUG(qdata, "it's is recovering, got EBUSY.\n");
699 CERROR("acquire qunit got error! (rc:%d)\n", rc);
702 /* remove the qunit from hash */
703 cfs_spin_lock(&qunit_hash_lock);
705 qunit = dqacq_in_flight(qctxt, qdata);
706 /* this qunit has been removed by qctxt_cleanup() */
708 cfs_spin_unlock(&qunit_hash_lock);
709 QDATA_DEBUG(qdata, "%s is discarded because qunit isn't found\n",
710 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
714 LASSERT(opc == qunit->lq_opc);
715 /* remove this qunit from lq_hash so that new processes cannot be added
716 * to qunit->lq_waiters */
717 remove_qunit_nolock(qunit);
718 cfs_spin_unlock(&qunit_hash_lock);
720 compute_lqs_after_removing_qunit(qunit);
723 rc = QUOTA_REQ_RETURNED;
724 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, rc);
725 /* wake up all waiters */
726 cfs_waitq_broadcast(&qunit->lq_waitq);
728 /* this is for dqacq_in_flight() */
730 if (rc < 0 && rc != -EDQUOT)
733 /* don't reschedule in such cases:
734 * - acq/rel failure and qunit isn't changed,
735 * but not for quota recovery.
736 * - local dqacq/dqrel.
737 * - local disk io failure.
741 GOTO(out1, err = -ENOMEM);
742 qdata_to_oqaq(qdata, oqaq);
743 /* adjust the qunit size in slaves */
744 rc1 = quota_adjust_slave_lqs(oqaq, qctxt);
747 CERROR("adjust slave's qunit size failed!(rc:%d)\n", rc1);
748 GOTO(out1, err = rc1);
750 if (err || (rc < 0 && rc != -EBUSY && rc1 == 0) || is_master(qctxt))
753 if (opc == QUOTA_DQREL && qdata->qd_count >= 5242880 &&
754 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_DELAY_REL))
757 /* reschedule another dqacq/dqrel if needed */
759 qdata->qd_flags &= LQUOTA_QUNIT_FLAGS;
760 rc1 = check_cur_qunit(obd, qctxt, qdata);
763 opc = rc1 == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
764 rc1 = schedule_dqacq(obd, qctxt, qdata, opc, 0, NULL);
765 QDATA_DEBUG(qdata, "reschedudle opc(%d) rc(%d)\n", opc, rc1);
768 /* this is for alloc_qunit() */
773 struct dqacq_async_args {
774 struct lustre_quota_ctxt *aa_ctxt;
775 struct lustre_qunit *aa_qunit;
778 static int dqacq_interpret(const struct lu_env *env,
779 struct ptlrpc_request *req, void *data, int rc)
781 struct dqacq_async_args *aa = (struct dqacq_async_args *)data;
782 struct lustre_quota_ctxt *qctxt = aa->aa_ctxt;
783 struct obd_device_target *obt = qctxt->lqc_obt;
784 struct lustre_qunit *qunit = aa->aa_qunit;
785 struct obd_device *obd = req->rq_import->imp_obd;
786 struct qunit_data *qdata = NULL;
790 LASSERT(req->rq_import);
792 cfs_down_read(&obt->obt_rwsem);
793 /* if a quota req timeouts or is dropped, we should update quota
794 * statistics which will be handled in dqacq_completion. And in
795 * this situation we should get qdata from request instead of
797 qdata = quota_get_qdata(req, (rc != 0) ? QUOTA_REQUEST : QUOTA_REPLY,
801 DEBUG_REQ(D_ERROR, req,
802 "error unpacking qunit_data(rc: %ld)\n",
804 qdata = &qunit->lq_data;
807 QDATA_DEBUG(qdata, "qdata: interpret rc(%d).\n", rc);
808 QDATA_DEBUG((&qunit->lq_data), "lq_data: \n");
810 if (qdata->qd_id != qunit->lq_data.qd_id ||
811 OBD_FAIL_CHECK(OBD_FAIL_QUOTA_RET_QDATA)) {
812 CERROR("the returned qd_id isn't expected!"
813 "(qdata: %u, lq_data: %u)\n", qdata->qd_id,
814 qunit->lq_data.qd_id);
815 qdata->qd_id = qunit->lq_data.qd_id;
818 if (QDATA_IS_GRP(qdata) != QDATA_IS_GRP(&qunit->lq_data)) {
819 CERROR("the returned grp/usr isn't expected!"
820 "(qdata: %u, lq_data: %u)\n", qdata->qd_flags,
821 qunit->lq_data.qd_flags);
822 if (QDATA_IS_GRP(&qunit->lq_data))
823 QDATA_SET_GRP(qdata);
825 QDATA_CLR_GRP(qdata);
828 if (qdata->qd_count > qunit->lq_data.qd_count) {
829 CERROR("the returned qd_count isn't expected!"
830 "(qdata: "LPU64", lq_data: "LPU64")\n", qdata->qd_count,
831 qunit->lq_data.qd_count);
835 if (unlikely(rc == -ESRCH))
836 CERROR("quota for %s has been enabled by master, but disabled "
837 "by slave.\n", QDATA_IS_GRP(qdata) ? "group" : "user");
839 rc = dqacq_completion(obd, qctxt, qdata, rc,
840 lustre_msg_get_opc(req->rq_reqmsg));
842 cfs_up_read(&obt->obt_rwsem);
847 * check if quota master is online
849 int check_qm(struct lustre_quota_ctxt *qctxt)
854 cfs_spin_lock(&qctxt->lqc_lock);
855 /* quit waiting when mds is back or qctxt is cleaned up */
856 rc = qctxt->lqc_import || !qctxt->lqc_valid;
857 cfs_spin_unlock(&qctxt->lqc_lock);
862 /* wake up all waiting threads when lqc_import is NULL */
863 void dqacq_interrupt(struct lustre_quota_ctxt *qctxt)
865 struct lustre_qunit *qunit, *tmp;
869 cfs_spin_lock(&qunit_hash_lock);
870 for (i = 0; i < NR_DQHASH; i++) {
871 cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
873 if (qunit->lq_ctxt != qctxt)
876 /* Wake up all waiters. Do not change lq_state.
877 * The waiters will check lq_rc which is kept as 0
878 * if no others change it, then the waiters will return
879 * -EAGAIN to caller who can perform related quota
880 * acq/rel if necessary. */
881 cfs_waitq_broadcast(&qunit->lq_waitq);
884 cfs_spin_unlock(&qunit_hash_lock);
888 static int got_qunit(struct lustre_qunit *qunit, int is_master)
890 struct lustre_quota_ctxt *qctxt = qunit->lq_ctxt;
894 cfs_spin_lock(&qunit->lq_lock);
895 switch (qunit->lq_state) {
897 case QUNIT_RM_FROM_HASH:
903 CERROR("invalid qunit state %d\n", qunit->lq_state);
905 cfs_spin_unlock(&qunit->lq_lock);
908 cfs_spin_lock(&qctxt->lqc_lock);
909 rc = !qctxt->lqc_valid;
911 rc |= !qctxt->lqc_import;
912 cfs_spin_unlock(&qctxt->lqc_lock);
919 revoke_lqs_rec(struct lustre_qunit_size *lqs, struct qunit_data *qdata, int opc)
921 /* revoke lqs_xxx_rec which is computed in check_cur_qunit
923 cfs_spin_lock(&lqs->lqs_lock);
924 quota_compute_lqs(qdata, lqs, 0, (opc == QUOTA_DQACQ) ? 1 : 0);
925 cfs_spin_unlock(&lqs->lqs_lock);
928 static int verify_cur_qunit(struct obd_device *obd,
929 struct lustre_quota_ctxt *qctxt,
930 struct qunit_data *qdata, int opc)
932 struct obd_quotactl *qctl;
937 /* extra quota acquire can be tolerated. */
938 if (opc == QUOTA_DQACQ)
943 CERROR("Fail to allocate mem!\n");
947 qctl->qc_cmd = Q_GETQUOTA;
948 qctl->qc_id = qdata->qd_id;
949 qctl->qc_type = QDATA_IS_GRP(qdata);
950 ret = fsfilt_quotactl(obd, qctxt->lqc_sb, qctl);
952 /* -ESRCH means no limit */
953 CDEBUG(ret == -ESRCH ? D_QUOTA : D_ERROR,
954 "Can't get quota usage! rc:%d\n", ret);
958 if (QDATA_IS_BLK(qdata))
959 limit = qctl->qc_dqblk.dqb_bhardlimit << QUOTABLOCK_BITS;
961 limit = qctl->qc_dqblk.dqb_ihardlimit;
963 if (limit <= qdata->qd_count) {
964 CDEBUG(D_QUOTA, "drop extra release. id(%u), flag(%u), "
965 "type(%c), isblk(%c), count("LPU64"), "
966 "qd_qunit("LPU64"), hardlimit("LPU64").\n",
967 qdata->qd_id, qdata->qd_flags,
968 QDATA_IS_GRP(qdata) ? 'g' : 'u',
969 QDATA_IS_BLK(qdata) ? 'b' : 'i',
970 qdata->qd_count, qdata->qd_qunit, limit);
979 schedule_dqacq(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
980 struct qunit_data *qdata, int opc, int wait,
981 struct obd_trans_info *oti)
983 struct lustre_qunit *qunit, *empty;
984 struct l_wait_info lwi = { 0 };
985 struct ptlrpc_request *req;
986 struct dqacq_async_args *aa;
987 struct obd_import *imp = NULL;
988 struct lustre_qunit_size *lqs = NULL;
989 struct timeval work_start;
990 struct timeval work_end;
995 LASSERT(opc == QUOTA_DQACQ || opc == QUOTA_DQREL);
996 cfs_gettimeofday(&work_start);
998 lqs = quota_search_lqs(LQS_KEY(QDATA_IS_GRP(qdata), qdata->qd_id),
1000 if (lqs == NULL || IS_ERR(lqs)) {
1001 CERROR("Can't find the lustre qunit size!\n");
1005 if ((empty = alloc_qunit(qctxt, qdata, opc)) == NULL) {
1006 revoke_lqs_rec(lqs, qdata, opc);
1007 /* this is for quota_search_lqs */
1012 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_SD, 5);
1014 cfs_spin_lock(&qunit_hash_lock);
1015 qunit = dqacq_in_flight(qctxt, qdata);
1017 cfs_spin_unlock(&qunit_hash_lock);
1020 revoke_lqs_rec(lqs, qdata, opc);
1021 /* this is for quota_search_lqs */
1023 goto wait_completion;
1027 insert_qunit_nolock(qctxt, qunit);
1028 cfs_spin_unlock(&qunit_hash_lock);
1030 /* From here, the quota request will be sent anyway.
1031 * When this qdata request returned or is cancelled,
1032 * lqs_putref will be called at that time */
1034 /* this is for quota_search_lqs */
1039 * There is a race between the check_cur_qunit() and the
1040 * dqacq_completion(): check_cur_qunit() read hardlimit
1041 * and calculate how much quota need be acquired/released
1042 * based on the hardlimit, however, the hardlimit can be
1043 * changed by the dqacq_completion() at anytime. So that
1044 * could result in extra quota acquire/release when there
1045 * is inflight dqacq.
1047 * In general, such extra dqacq dosen't bring fatal error,
1048 * unless an extra release is going to release more than
1049 * 'hardlimit' quota.
1051 * To minimize the code changes (anyway, it'll be totally
1052 * rewritten in the new quota design), we just do one more
1053 * check here to avoid the extra release which could bring
1054 * fatal error. A better solution could be calculating the
1055 * qd_count here and removing the lqs_blk/ino_rec stuff.
1057 rc = verify_cur_qunit(obd, qctxt, qdata, opc);
1059 cfs_spin_lock(&qunit_hash_lock);
1060 remove_qunit_nolock(qunit);
1061 cfs_spin_unlock(&qunit_hash_lock);
1063 compute_lqs_after_removing_qunit(qunit);
1064 /* this is for qunit_get() */
1066 /* this for alloc_qunit() */
1068 /* drop this extra release silently */
1072 QDATA_DEBUG(qdata, "obd(%s): send %s quota req\n",
1073 obd->obd_name, (opc == QUOTA_DQACQ) ? "acq" : "rel");
1074 /* master is going to dqacq/dqrel from itself */
1075 if (is_master(qctxt)) {
1077 QDATA_DEBUG(qdata, "local %s.\n",
1078 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
1079 QDATA_SET_CHANGE_QS(qdata);
1080 rc = qctxt->lqc_handler(obd, qdata, opc);
1081 rc2 = dqacq_completion(obd, qctxt, qdata, rc, opc);
1082 /* this is for qunit_get() */
1085 cfs_gettimeofday(&work_end);
1086 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1087 if (opc == QUOTA_DQACQ)
1088 lprocfs_counter_add(qctxt->lqc_stats,
1089 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1092 lprocfs_counter_add(qctxt->lqc_stats,
1093 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1095 RETURN(rc ? rc : rc2);
1098 cfs_spin_lock(&qctxt->lqc_lock);
1099 if (!qctxt->lqc_import) {
1100 cfs_spin_unlock(&qctxt->lqc_lock);
1101 QDATA_DEBUG(qdata, "lqc_import is invalid.\n");
1103 cfs_spin_lock(&qunit_hash_lock);
1104 remove_qunit_nolock(qunit);
1105 cfs_spin_unlock(&qunit_hash_lock);
1107 compute_lqs_after_removing_qunit(qunit);
1109 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, -EAGAIN);
1110 cfs_waitq_broadcast(&qunit->lq_waitq);
1112 /* this is for qunit_get() */
1114 /* this for alloc_qunit() */
1116 cfs_spin_lock(&qctxt->lqc_lock);
1117 if (wait && !qctxt->lqc_import) {
1118 cfs_spin_unlock(&qctxt->lqc_lock);
1119 LASSERT(oti && oti->oti_thread);
1120 /* The recovery thread doesn't have watchdog
1121 * attached. LU-369 */
1122 if (oti->oti_thread->t_watchdog)
1123 lc_watchdog_disable(oti->oti_thread->\
1125 CDEBUG(D_QUOTA, "sleep for quota master\n");
1126 l_wait_event(qctxt->lqc_wait_for_qmaster,
1127 check_qm(qctxt), &lwi);
1128 CDEBUG(D_QUOTA, "wake up when quota master is back\n");
1129 if (oti->oti_thread->t_watchdog)
1130 lc_watchdog_touch(oti->oti_thread->t_watchdog,
1131 ptlrpc_server_get_timeout(\
1132 oti->oti_thread->t_svcpt));
1134 cfs_spin_unlock(&qctxt->lqc_lock);
1139 imp = class_import_get(qctxt->lqc_import);
1140 cfs_spin_unlock(&qctxt->lqc_lock);
1142 /* build dqacq/dqrel request */
1145 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_QUOTA_DQACQ,
1146 LUSTRE_MDS_VERSION, opc);
1147 class_import_put(imp);
1149 CERROR("Can't alloc request\n");
1150 dqacq_completion(obd, qctxt, qdata, -ENOMEM, opc);
1151 /* this is for qunit_get() */
1156 ptlrpc_request_set_replen(req);
1157 req->rq_no_resend = req->rq_no_delay = 1;
1158 rc = quota_copy_qdata(req, qdata, QUOTA_REQUEST, QUOTA_IMPORT);
1160 CERROR("Can't pack qunit_data(rc: %d)\n", rc);
1161 ptlrpc_req_finished(req);
1162 dqacq_completion(obd, qctxt, qdata, -EPROTO, opc);
1163 /* this is for qunit_get() */
1168 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1169 aa = ptlrpc_req_async_args(req);
1170 aa->aa_ctxt = qctxt;
1171 aa->aa_qunit = qunit;
1173 req->rq_interpret_reply = dqacq_interpret;
1174 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
1176 QDATA_DEBUG(qdata, "%s scheduled.\n",
1177 opc == QUOTA_DQACQ ? "DQACQ" : "DQREL");
1179 if (wait && qunit) {
1180 struct qunit_data *p = &qunit->lq_data;
1182 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1183 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1185 /* rc = -EAGAIN, it means the quota master isn't ready yet
1186 * rc = QUOTA_REQ_RETURNED, it means a quota req is finished;
1187 * rc = -EDQUOT, it means out of quota
1188 * rc = -EBUSY, it means recovery is happening
1189 * other rc < 0, it means real errors, functions who call
1190 * schedule_dqacq should take care of this */
1191 cfs_spin_lock(&qunit->lq_lock);
1193 cfs_spin_unlock(&qunit->lq_lock);
1194 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: id(%u) flag(%u) "
1195 "rc(%d) owner(%d)\n", qunit, qunit->lq_data.qd_id,
1196 qunit->lq_data.qd_flags, rc, qunit->lq_owner);
1200 cfs_gettimeofday(&work_end);
1201 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1202 if (opc == QUOTA_DQACQ)
1203 lprocfs_counter_add(qctxt->lqc_stats,
1204 wait ? LQUOTA_SYNC_ACQ : LQUOTA_ASYNC_ACQ,
1207 lprocfs_counter_add(qctxt->lqc_stats,
1208 wait ? LQUOTA_SYNC_REL : LQUOTA_ASYNC_REL,
1215 qctxt_adjust_qunit(struct obd_device *obd, struct lustre_quota_ctxt *qctxt,
1216 const unsigned int id[], __u32 isblk, int wait,
1217 struct obd_trans_info *oti)
1219 int rc = 0, i = USRQUOTA;
1220 struct qunit_data qdata[MAXQUOTAS];
1223 /* XXX In quota_chk_acq_common(), we do something like:
1225 * while (quota_check_common() & QUOTA_RET_ACQUOTA) {
1226 * rc = qctxt_adjust_qunit();
1229 * to make sure the slave acquired enough quota from master.
1231 * Unfortunately, qctxt_adjust_qunit() checks QB/QI_SET to
1232 * decide if do real DQACQ or not, but quota_check_common()
1233 * doesn't check QB/QI_SET flags. This inconsistence could
1234 * lead into an infinite loop.
1236 * We can't fix it by simply adding QB/QI_SET checking in the
1237 * quota_check_common(), since we must guarantee that the
1238 * paried quota_pending_commit() has same QB/QI_SET, but the
1239 * flags can be actually cleared at any time...
1241 * A quick non-intrusive solution is to just skip the
1242 * QB/QI_SET checking here when the @wait is non-zero.
1243 * (If the @wait is non-zero, the caller must have already
1244 * checked the QB/QI_SET).
1246 if (!wait && quota_is_set(obd, id, isblk ? QB_SET : QI_SET) == 0)
1249 for (i = 0; i < MAXQUOTAS; i++) {
1250 qdata[i].qd_id = id[i];
1251 qdata[i].qd_flags = i;
1253 QDATA_SET_BLK(&qdata[i]);
1254 qdata[i].qd_count = 0;
1256 rc = check_cur_qunit(obd, qctxt, &qdata[i]);
1259 /* need acquire or release */
1260 opc = rc == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1261 rc = schedule_dqacq(obd, qctxt, &qdata[i], opc,
1265 } else if (wait == 1) {
1266 /* when wait equates 1, that means mds_quota_acquire
1267 * or filter_quota_acquire is calling it. */
1268 rc = qctxt_wait_pending_dqacq(qctxt, id[i], i, isblk);
1278 qctxt_wait_pending_dqacq(struct lustre_quota_ctxt *qctxt, unsigned int id,
1279 unsigned short type, int isblk)
1281 struct lustre_qunit *qunit = NULL;
1282 struct qunit_data qdata;
1283 struct timeval work_start;
1284 struct timeval work_end;
1286 struct l_wait_info lwi = { 0 };
1290 cfs_gettimeofday(&work_start);
1292 qdata.qd_flags = type;
1294 QDATA_SET_BLK(&qdata);
1297 cfs_spin_lock(&qunit_hash_lock);
1298 qunit = dqacq_in_flight(qctxt, &qdata);
1299 cfs_spin_unlock(&qunit_hash_lock);
1302 struct qunit_data *p = &qunit->lq_data;
1304 QDATA_DEBUG(p, "qunit(%p) is waiting for dqacq.\n", qunit);
1305 l_wait_event(qunit->lq_waitq, got_qunit(qunit, is_master(qctxt)),
1307 CDEBUG(D_QUOTA, "qunit(%p) finishes waiting: rc(%d) "
1308 "owner(%d)\n", qunit, qunit->lq_rc, qunit->lq_owner);
1309 /* keep same as schedule_dqacq() b=17030 */
1310 cfs_spin_lock(&qunit->lq_lock);
1312 cfs_spin_unlock(&qunit->lq_lock);
1313 /* this is for dqacq_in_flight() */
1315 cfs_gettimeofday(&work_end);
1316 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1317 lprocfs_counter_add(qctxt->lqc_stats,
1318 isblk ? LQUOTA_WAIT_PENDING_BLK_QUOTA :
1319 LQUOTA_WAIT_PENDING_INO_QUOTA,
1322 cfs_gettimeofday(&work_end);
1323 timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
1324 lprocfs_counter_add(qctxt->lqc_stats,
1325 isblk ? LQUOTA_NOWAIT_PENDING_BLK_QUOTA :
1326 LQUOTA_NOWAIT_PENDING_INO_QUOTA,
1334 qctxt_init(struct obd_device *obd, dqacq_handler_t handler)
1336 struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
1337 struct obd_device_target *obt = &obd->u.obt;
1338 struct super_block *sb = obt->obt_sb;
1344 rc = ptlrpcd_addref();
1348 cfs_waitq_init(&qctxt->lqc_wait_for_qmaster);
1349 cfs_waitq_init(&qctxt->lqc_lqs_waitq);
1350 cfs_atomic_set(&qctxt->lqc_lqs, 0);
1351 cfs_spin_lock_init(&qctxt->lqc_lock);
1352 cfs_spin_lock(&qctxt->lqc_lock);
1353 qctxt->lqc_handler = handler;
1355 qctxt->lqc_obt = obt;
1356 qctxt->lqc_import = NULL;
1357 qctxt->lqc_recovery = 0;
1358 qctxt->lqc_switch_qs = 1; /* Change qunit size in default setting */
1359 qctxt->lqc_valid = 1;
1360 qctxt->lqc_cqs_boundary_factor = 4;
1361 qctxt->lqc_cqs_least_bunit = PTLRPC_MAX_BRW_SIZE;
1362 qctxt->lqc_cqs_least_iunit = 2;
1363 qctxt->lqc_cqs_qs_factor = 2;
1364 qctxt->lqc_flags = 0;
1365 QUOTA_MASTER_UNREADY(qctxt);
1366 qctxt->lqc_bunit_sz = default_bunit_sz;
1367 qctxt->lqc_btune_sz = default_bunit_sz / 100 * default_btune_ratio;
1368 qctxt->lqc_iunit_sz = default_iunit_sz;
1369 qctxt->lqc_itune_sz = default_iunit_sz * default_itune_ratio / 100;
1370 qctxt->lqc_switch_seconds = 300; /* enlarging will wait 5 minutes
1371 * after the last shrinking */
1372 qctxt->lqc_sync_blk = 0;
1373 cfs_spin_unlock(&qctxt->lqc_lock);
1375 qctxt->lqc_lqs_hash = cfs_hash_create("LQS_HASH",
1378 min(hash_lqs_cur_bits,
1380 0, CFS_HASH_MIN_THETA,
1382 &lqs_hash_ops, CFS_HASH_DEFAULT);
1383 if (!qctxt->lqc_lqs_hash) {
1384 CERROR("initialize hash lqs for %s error!\n", obd->obd_name);
1389 rc = lquota_proc_setup(obd, is_master(qctxt));
1391 CERROR("initialize proc for %s error!\n", obd->obd_name);
1397 static int check_lqs(struct lustre_quota_ctxt *qctxt)
1402 rc = !cfs_atomic_read(&qctxt->lqc_lqs);
1407 int qctxt_del_lqs(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1408 cfs_hlist_node_t *hnode, void *data)
1410 /* remove from hash and -1 refcount */
1411 cfs_hash_bd_del_locked(hs, bd, hnode);
1415 void qctxt_cleanup(struct lustre_quota_ctxt *qctxt, int force)
1417 struct lustre_qunit *qunit, *tmp;
1418 cfs_list_t tmp_list;
1419 struct l_wait_info lwi = { 0 };
1420 struct obd_device_target *obt = qctxt->lqc_obt;
1424 CFS_INIT_LIST_HEAD(&tmp_list);
1426 cfs_spin_lock(&qctxt->lqc_lock);
1427 qctxt->lqc_valid = 0;
1428 cfs_spin_unlock(&qctxt->lqc_lock);
1430 cfs_spin_lock(&qunit_hash_lock);
1431 for (i = 0; i < NR_DQHASH; i++) {
1432 cfs_list_for_each_entry_safe(qunit, tmp, &qunit_hash[i],
1434 if (qunit->lq_ctxt != qctxt)
1436 remove_qunit_nolock(qunit);
1437 cfs_list_add(&qunit->lq_hash, &tmp_list);
1440 cfs_spin_unlock(&qunit_hash_lock);
1442 cfs_list_for_each_entry_safe(qunit, tmp, &tmp_list, lq_hash) {
1443 cfs_list_del_init(&qunit->lq_hash);
1444 compute_lqs_after_removing_qunit(qunit);
1446 /* wake up all waiters */
1447 QUNIT_SET_STATE_AND_RC(qunit, QUNIT_FINISHED, 0);
1448 cfs_waitq_broadcast(&qunit->lq_waitq);
1452 /* after qctxt_cleanup, qctxt might be freed, then check_qm() is
1453 * unpredicted. So we must wait until lqc_wait_for_qmaster is empty */
1454 while (cfs_waitq_active(&qctxt->lqc_wait_for_qmaster)) {
1455 cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
1456 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
1457 cfs_time_seconds(1));
1460 /* release refcount on lustre_qunit_size holding by lqs_hash */
1461 cfs_hash_for_each_safe(qctxt->lqc_lqs_hash, qctxt_del_lqs, NULL);
1463 l_wait_event(qctxt->lqc_lqs_waitq, check_lqs(qctxt), &lwi);
1464 cfs_down_write(&obt->obt_rwsem);
1465 cfs_hash_putref(qctxt->lqc_lqs_hash);
1466 qctxt->lqc_lqs_hash = NULL;
1467 cfs_up_write(&obt->obt_rwsem);
1472 if (lquota_proc_cleanup(qctxt))
1473 CERROR("cleanup proc error!\n");
1479 struct qslave_recov_thread_data {
1480 struct obd_device *obd;
1481 struct lustre_quota_ctxt *qctxt;
1482 cfs_completion_t comp;
1485 /* FIXME only recovery block quota by now */
1486 static int qslave_recovery_main(void *arg)
1488 struct qslave_recov_thread_data *data = arg;
1489 struct obd_device *obd = data->obd;
1490 struct lustre_quota_ctxt *qctxt = data->qctxt;
1495 cfs_daemonize_ctxt("qslave_recovd");
1498 class_incref(obd, "qslave_recovd_filter", obd);
1500 cfs_complete(&data->comp);
1502 cfs_spin_lock(&qctxt->lqc_lock);
1503 if (qctxt->lqc_recovery) {
1504 cfs_spin_unlock(&qctxt->lqc_lock);
1505 class_decref(obd, "qslave_recovd_filter", obd);
1508 qctxt->lqc_recovery = 1;
1509 cfs_spin_unlock(&qctxt->lqc_lock);
1512 for (type = USRQUOTA; type < MAXQUOTAS; type++) {
1513 struct qunit_data qdata;
1514 struct quota_info *dqopt = sb_dqopt(qctxt->lqc_sb);
1516 struct dquot_id *dqid, *tmp;
1519 mutex_lock(&dqopt->dqonoff_mutex);
1520 if (!ll_sb_has_quota_active(qctxt->lqc_sb, type)) {
1521 mutex_unlock(&dqopt->dqonoff_mutex);
1525 LASSERT(dqopt->files[type] != NULL);
1526 CFS_INIT_LIST_HEAD(&id_list);
1527 rc = fsfilt_qids(obd, NULL, dqopt->files[type], type, &id_list);
1528 mutex_unlock(&dqopt->dqonoff_mutex);
1530 CERROR("Get ids from quota file failed. (rc:%d)\n", rc);
1532 cfs_list_for_each_entry_safe(dqid, tmp, &id_list, di_link) {
1533 cfs_list_del_init(&dqid->di_link);
1534 /* skip slave recovery on itself */
1535 if (is_master(qctxt))
1537 if (rc && rc != -EBUSY)
1540 qdata.qd_id = dqid->di_id;
1541 qdata.qd_flags = type;
1542 QDATA_SET_BLK(&qdata);
1545 ret = check_cur_qunit(obd, qctxt, &qdata);
1548 opc = ret == 1 ? QUOTA_DQACQ : QUOTA_DQREL;
1549 rc = schedule_dqacq(obd, qctxt, &qdata, opc,
1557 if (rc && rc != -EBUSY)
1558 CERROR("qslave recovery failed! (id:%d type:%d "
1559 " rc:%d)\n", dqid->di_id, type, rc);
1565 cfs_spin_lock(&qctxt->lqc_lock);
1566 qctxt->lqc_recovery = 0;
1567 cfs_spin_unlock(&qctxt->lqc_lock);
1568 class_decref(obd, "qslave_recovd_filter", obd);
1573 qslave_start_recovery(struct obd_device *obd, struct lustre_quota_ctxt *qctxt)
1575 struct qslave_recov_thread_data data;
1579 if (!ll_sb_any_quota_active(qctxt->lqc_sb))
1584 cfs_init_completion(&data.comp);
1586 rc = cfs_create_thread(qslave_recovery_main, &data,
1589 CERROR("Cannot start quota recovery thread: rc %d\n", rc);
1592 cfs_wait_for_completion(&data.comp);
1597 inline int quota_is_on(struct lustre_quota_ctxt *qctxt,
1598 struct obd_quotactl *oqctl)
1600 return ((qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type)) ==
1601 UGQUOTA2LQC(oqctl->qc_type));
1604 inline int quota_is_off(struct lustre_quota_ctxt *qctxt,
1605 struct obd_quotactl *oqctl)
1607 return !(qctxt->lqc_flags & UGQUOTA2LQC(oqctl->qc_type));
1611 * When quotaon, build a lqs for every uid/gid who has been set limitation
1612 * for quota. After quota_search_lqs, it will hold one ref for the lqs.
1613 * It will be released when qctxt_cleanup() is executed b=18574
1615 * Should be called with obt->obt_quotachecking held. b=20152
1617 void build_lqs(struct obd_device *obd)
1619 struct obd_device_target *obt = &obd->u.obt;
1620 struct lustre_quota_ctxt *qctxt = &obt->obt_qctxt;
1624 LASSERT_SEM_LOCKED(&obt->obt_quotachecking);
1625 CFS_INIT_LIST_HEAD(&id_list);
1626 for (i = 0; i < MAXQUOTAS; i++) {
1627 struct dquot_id *dqid, *tmp;
1629 if (sb_dqopt(qctxt->lqc_sb)->files[i] == NULL)
1632 rc = fsfilt_qids(obd, NULL, sb_dqopt(qctxt->lqc_sb)->files[i],
1635 CERROR("%s: failed to get %s qids!\n", obd->obd_name,
1636 i ? "group" : "user");
1640 cfs_list_for_each_entry_safe(dqid, tmp, &id_list,
1642 struct lustre_qunit_size *lqs;
1644 cfs_list_del_init(&dqid->di_link);
1645 lqs = quota_search_lqs(LQS_KEY(i, dqid->di_id),
1647 if (lqs && !IS_ERR(lqs)) {
1648 lqs->lqs_flags |= dqid->di_flag;
1651 CERROR("%s: failed to create a lqs for %sid %u"
1652 "\n", obd->obd_name, i ? "g" : "u",
1662 * lqs<->qctxt hash operations
1666 * string hashing using djb2 hash algorithm
1669 lqs_hash(cfs_hash_t *hs, const void *key, unsigned mask)
1671 unsigned long long id;
1676 id = *((unsigned long long *)key);
1677 hash = (LQS_KEY_GRP(id) ? 5381 : 5387) * (unsigned)LQS_KEY_ID(id);
1679 RETURN(hash & mask);
1683 lqs_key(cfs_hlist_node_t *hnode)
1685 struct lustre_qunit_size *lqs;
1688 lqs = cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1689 RETURN(&lqs->lqs_key);
1693 lqs_keycmp(const void *key, cfs_hlist_node_t *hnode)
1695 struct lustre_qunit_size *q =
1696 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1698 RETURN(q->lqs_key == *((unsigned long long *)key));
1702 lqs_object(cfs_hlist_node_t *hnode)
1704 return cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1708 lqs_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1710 struct lustre_qunit_size *q =
1711 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1717 lqs_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1719 struct lustre_qunit_size *q =
1720 cfs_hlist_entry(hnode, struct lustre_qunit_size, lqs_hash);
1726 lqs_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
1728 CERROR("It should not have any item left to be handled by this!");
1731 static cfs_hash_ops_t lqs_hash_ops = {
1732 .hs_hash = lqs_hash,
1734 .hs_keycmp = lqs_keycmp,
1735 .hs_object = lqs_object,
1737 .hs_put_locked = lqs_put_locked,