* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
# include <linux/init.h>
# include <linux/fs.h>
# include <linux/jbd.h>
-# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-# include <linux/smp_lock.h>
-# include <linux/buffer_head.h>
-# include <linux/workqueue.h>
-# include <linux/mount.h>
-# else
-# include <linux/locks.h>
-# endif
+# include <linux/smp_lock.h>
+# include <linux/buffer_head.h>
+# include <linux/workqueue.h>
+# include <linux/mount.h>
#else /* __KERNEL__ */
# include <liblustre.h>
#endif
#ifdef HAVE_QUOTA_SUPPORT
static cfs_time_t last_print = 0;
-static spinlock_t last_print_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t last_print_lock = CFS_SPIN_LOCK_UNLOCKED;
static int filter_quota_setup(struct obd_device *obd)
{
struct obd_device_target *obt = &obd->u.obt;
ENTRY;
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
- atomic_set(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
rc = qctxt_init(obd, NULL);
if (rc)
CERROR("initialize quota context failed! (rc:%d)\n", rc);
{
struct obd_export *exp = data;
struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
- struct obd_import *imp;
+ struct obd_import *imp = exp->exp_imp_reverse;
ENTRY;
+ LASSERT(imp != NULL);
+
/* setup the quota context import */
- spin_lock(&qctxt->lqc_lock);
- qctxt->lqc_import = exp->exp_imp_reverse;
- spin_unlock(&qctxt->lqc_lock);
- CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated now, \n",
- obd->obd_name,exp->exp_imp_reverse, obd);
-
- /* make imp's connect flags equal relative exp's connect flags
- * adding it to avoid the scan export list
- */
- imp = qctxt->lqc_import;
- if (likely(imp))
+ cfs_spin_lock(&qctxt->lqc_lock);
+ if (qctxt->lqc_import != NULL) {
+ cfs_spin_unlock(&qctxt->lqc_lock);
+ if (qctxt->lqc_import == imp)
+ CDEBUG(D_WARNING, "%s: lqc_import(%p) of obd(%p) was "
+ "activated already.\n", obd->obd_name, imp, obd);
+ else
+ CERROR("%s: lqc_import(%p:%p) of obd(%p) was "
+ "activated by others.\n", obd->obd_name,
+ qctxt->lqc_import, imp, obd);
+ } else {
+ qctxt->lqc_import = imp;
+ /* make imp's connect flags equal relative exp's connect flags
+ * adding it to avoid the scan export list */
imp->imp_connect_data.ocd_connect_flags |=
- (exp->exp_connect_flags &
- (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
-
- cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
- /* start quota slave recovery thread. (release high limits) */
- qslave_start_recovery(obd, qctxt);
+ (exp->exp_connect_flags &
+ (OBD_CONNECT_QUOTA64 | OBD_CONNECT_CHANGE_QS));
+ cfs_spin_unlock(&qctxt->lqc_lock);
+ CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is reactivated "
+ "now.\n", obd->obd_name, imp, obd);
+
+ cfs_waitq_signal(&qctxt->lqc_wait_for_qmaster);
+ /* start quota slave recovery thread. (release high limits) */
+ qslave_start_recovery(obd, qctxt);
+ }
RETURN(0);
}
static int filter_quota_clearinfo(struct obd_export *exp, struct obd_device *obd)
{
struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
+ struct obd_import *imp = exp->exp_imp_reverse;
ENTRY;
/* lquota may be not set up before destroying export, b=14896 */
if (!obd->obd_set_up)
RETURN(0);
+ if (unlikely(imp == NULL))
+ RETURN(0);
+
/* when exp->exp_imp_reverse is destroyed, the corresponding lqc_import
* should be invalid b=12374 */
- if (qctxt->lqc_import && qctxt->lqc_import == exp->exp_imp_reverse) {
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
+ if (qctxt->lqc_import == imp) {
qctxt->lqc_import = NULL;
- spin_unlock(&qctxt->lqc_lock);
- CDEBUG(D_QUOTA, "%s: lqc_import of obd(%p) is invalid now.\n",
- obd->obd_name, obd);
+ cfs_spin_unlock(&qctxt->lqc_lock);
+ CDEBUG(D_QUOTA, "%s: lqc_import(%p) of obd(%p) is invalid now.\n",
+ obd->obd_name, imp, obd);
+ ptlrpc_cleanup_imp(imp);
+ dqacq_interrupt(qctxt);
+ } else {
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
RETURN(0);
}
{
ENTRY;
- if (!sb_any_quota_enabled(obd->u.obt.obt_sb))
+ if (!ll_sb_any_quota_active(obd->u.obt.obt_sb))
RETURN(0);
if (ignore) {
RETURN(0);
}
+#define GET_OA_ID(flag, oa) (flag == USRQUOTA ? oa->o_uid : oa->o_gid)
static int filter_quota_getflag(struct obd_device *obd, struct obdo *oa)
{
struct obd_device_target *obt = &obd->u.obt;
struct obd_quotactl *oqctl;
ENTRY;
- if (!sb_any_quota_enabled(obt->obt_sb))
+ if (!ll_sb_any_quota_active(obt->obt_sb))
RETURN(0);
OBD_ALLOC_PTR(oqctl);
- if (!oqctl) {
- CERROR("Not enough memory!");
+ if (!oqctl)
RETURN(-ENOMEM);
- }
/* set over quota flags for a uid/gid */
oa->o_valid |= OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA;
oa->o_flags &= ~(OBD_FL_NO_USRQUOTA | OBD_FL_NO_GRPQUOTA);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- struct quota_adjust_qunit oqaq_tmp;
struct lustre_qunit_size *lqs = NULL;
- oqaq_tmp.qaq_flags = cnt;
- oqaq_tmp.qaq_id = (cnt == USRQUOTA) ? oa->o_uid : oa->o_gid;
-
- quota_search_lqs(NULL, &oqaq_tmp, qctxt, &lqs);
- if (lqs) {
- spin_lock(&lqs->lqs_lock);
+ lqs = quota_search_lqs(LQS_KEY(cnt, GET_OA_ID(cnt, oa)),
+ qctxt, 0);
+ if (lqs == NULL || IS_ERR(lqs)) {
+ rc = PTR_ERR(lqs);
+ if (rc)
+ CDEBUG(D_QUOTA, "search lqs for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid,
+ rc);
+ break;
+ } else {
+ cfs_spin_lock(&lqs->lqs_lock);
if (lqs->lqs_bunit_sz <= qctxt->lqc_sync_blk) {
oa->o_flags |= (cnt == USRQUOTA) ?
OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
CDEBUG(D_QUOTA, "set sync flag: bunit(%lu), "
"sync_blk(%d)\n", lqs->lqs_bunit_sz,
qctxt->lqc_sync_blk);
lqs_putref(lqs);
continue;
}
- spin_unlock(&lqs->lqs_lock);
+ cfs_spin_unlock(&lqs->lqs_lock);
/* this is for quota_search_lqs */
lqs_putref(lqs);
}
rc = err;
oa->o_valid &= ~((cnt == USRQUOTA) ? OBD_MD_FLUSRQUOTA :
OBD_MD_FLGRPQUOTA);
+ CDEBUG(D_QUOTA, "fsfilt getquota for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid, err);
continue;
}
if (oqctl->qc_dqblk.dqb_bhardlimit &&
(toqb(oqctl->qc_dqblk.dqb_curspace) >=
- oqctl->qc_dqblk.dqb_bhardlimit))
+ oqctl->qc_dqblk.dqb_bhardlimit)) {
oa->o_flags |= (cnt == USRQUOTA) ?
OBD_FL_NO_USRQUOTA : OBD_FL_NO_GRPQUOTA;
+ CDEBUG(D_QUOTA, "out of quota for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group",
+ cnt == USRQUOTA ? oa->o_uid : oa->o_gid);
+ }
}
OBD_FREE_PTR(oqctl);
RETURN(rc);
* check whether the left quota of certain uid and gid can satisfy a block_write
* or inode_create rpc. When need to acquire quota, return QUOTA_RET_ACQUOTA
*/
-static int quota_check_common(struct obd_device *obd, unsigned int uid,
- unsigned int gid, int count, int cycle, int isblk,
- struct inode *inode, int frags, int *pending)
+static int quota_check_common(struct obd_device *obd, const unsigned int id[],
+ int pending[], int count, int cycle, int isblk,
+ struct inode *inode, int frags)
{
struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
int i;
- __u32 id[MAXQUOTAS] = { uid, gid };
struct qunit_data qdata[MAXQUOTAS];
int mb = 0;
int rc = 0, rc2[2] = { 0, 0 };
ENTRY;
- CLASSERT(MAXQUOTAS < 4);
- if (!sb_any_quota_enabled(qctxt->lqc_sb))
- RETURN(rc);
-
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_valid){
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
RETURN(rc);
}
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
continue;
- quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
- if (!lqs)
+ lqs = quota_search_lqs(LQS_KEY(i, id[i]), qctxt, 0);
+ if (lqs == NULL || IS_ERR(lqs))
continue;
+ if (IS_ERR(lqs)) {
+ CERROR("can not find lqs for check_common: "
+ "[id %u] [%c] [isblk %d] [count %d] [rc %ld]\n",
+ id[i], i % 2 ? 'g': 'u', isblk, count,
+ PTR_ERR(lqs));
+ RETURN(PTR_ERR(lqs));
+ }
+
rc2[i] = compute_remquota(obd, qctxt, &qdata[i], isblk);
- spin_lock(&lqs->lqs_lock);
+ cfs_spin_lock(&lqs->lqs_lock);
if (!cycle) {
if (isblk) {
- *pending = count * CFS_PAGE_SIZE;
+ pending[i] = count * CFS_PAGE_SIZE;
/* in order to complete this write, we need extra
* meta blocks. This function can get it through
* data needed to be written b=16542 */
- mb = *pending;
- LASSERT(inode && frags > 0);
- if (fsfilt_get_mblk(obd, qctxt->lqc_sb, &mb,
- inode, frags) < 0)
- CDEBUG(D_ERROR,
- "can't get extra meta blocks.\n");
- else
- *pending += mb;
- lqs->lqs_bwrite_pending += *pending;
+ if (inode) {
+ mb = pending[i];
+ rc = fsfilt_get_mblk(obd, qctxt->lqc_sb,
+ &mb, inode,
+ frags);
+ if (rc)
+ CERROR("%s: can't get extra "
+ "meta blocks\n",
+ obd->obd_name);
+ else
+ pending[i] += mb;
+ }
+ lqs->lqs_bwrite_pending += pending[i];
} else {
- *pending = count;
- lqs->lqs_iwrite_pending += *pending;
+ pending[i] = count;
+ lqs->lqs_iwrite_pending += pending[i];
}
}
+
+ /* if xx_rec < 0, that means quota are releasing,
+ * and it may return before we use quota. So if
+ * we find this situation, we assuming it has
+ * returned b=18491 */
+ if (isblk && lqs->lqs_blk_rec < 0) {
+ if (qdata[i].qd_count < -lqs->lqs_blk_rec)
+ qdata[i].qd_count = 0;
+ else
+ qdata[i].qd_count += lqs->lqs_blk_rec;
+ }
+ if (!isblk && lqs->lqs_ino_rec < 0) {
+ if (qdata[i].qd_count < -lqs->lqs_ino_rec)
+ qdata[i].qd_count = 0;
+ else
+ qdata[i].qd_count += lqs->lqs_ino_rec;
+ }
+
+ CDEBUG(D_QUOTA, "[id %u] [%c] [isblk %d] [count %d]"
+ " [lqs pending: %lu] [qd_count: "LPU64"] [metablocks: %d]"
+ " [pending: %d]\n", id[i], i % 2 ? 'g': 'u', isblk, count,
+ isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
+ qdata[i].qd_count, mb, pending[i]);
if (rc2[i] == QUOTA_RET_OK) {
if (isblk && qdata[i].qd_count < lqs->lqs_bwrite_pending)
rc2[i] = QUOTA_RET_ACQUOTA;
lqs->lqs_iwrite_pending)
rc2[i] = QUOTA_RET_ACQUOTA;
}
- spin_unlock(&lqs->lqs_lock);
- CDEBUG(D_QUOTA, "count: %d, lqs pending: %lu, qd_count: "LPU64
- ", metablocks: %d, isblk: %d, pending: %d.\n", count,
- isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
- qdata[i].qd_count, mb, isblk, *pending);
+
+ cfs_spin_unlock(&lqs->lqs_lock);
+
+ if (lqs->lqs_blk_rec < 0 &&
+ qdata[i].qd_count <
+ lqs->lqs_bwrite_pending - lqs->lqs_blk_rec - mb)
+ OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REL, 5);
/* When cycle is zero, lqs_*_pending will be changed. We will
* get reference of the lqs here and put reference of lqs in
RETURN(rc);
}
-static int quota_chk_acq_common(struct obd_device *obd, unsigned int uid,
- unsigned int gid, int count, int *pending,
- quota_acquire acquire,
+int quota_is_set(struct obd_device *obd, const unsigned int id[], int flag)
+{
+ struct lustre_qunit_size *lqs;
+ int i, q_set = 0;
+
+ if (!ll_sb_any_quota_active(obd->u.obt.obt_qctxt.lqc_sb))
+ RETURN(0);
+
+ for (i = 0; i < MAXQUOTAS; i++) {
+ lqs = quota_search_lqs(LQS_KEY(i, id[i]),
+ &obd->u.obt.obt_qctxt, 0);
+ if (lqs && !IS_ERR(lqs)) {
+ if (lqs->lqs_flags & flag)
+ q_set = 1;
+ lqs_putref(lqs);
+ }
+ }
+
+ return q_set;
+}
+
+static int quota_chk_acq_common(struct obd_device *obd, struct obd_export *exp,
+ const unsigned int id[], int pending[],
+ int count, quota_acquire acquire,
struct obd_trans_info *oti, int isblk,
struct inode *inode, int frags)
{
int rc = 0, cycle = 0, count_err = 1;
ENTRY;
+ if (!quota_is_set(obd, id, isblk ? QB_SET : QI_SET))
+ RETURN(0);
+
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* If the client has been evicted or if it
+ * timed out and tried to reconnect already,
+ * abort the request immediately */
+ RETURN(-ENOTCONN);
+
CDEBUG(D_QUOTA, "check quota for %s\n", obd->obd_name);
- *pending = 0;
+ pending[USRQUOTA] = pending[GRPQUOTA] = 0;
/* Unfortunately, if quota master is too busy to handle the
* pre-dqacq in time and quota hash on ost is used up, we
* have to wait for the completion of in flight dqacq/dqrel,
* in order to get enough quota for write b=12588 */
- do_gettimeofday(&work_start);
- while ((rc = quota_check_common(obd, uid, gid, count, cycle, isblk,
- inode, frags, pending)) &
+ cfs_gettimeofday(&work_start);
+ while ((rc = quota_check_common(obd, id, pending, count, cycle, isblk,
+ inode, frags)) &
QUOTA_RET_ACQUOTA) {
- spin_lock(&qctxt->lqc_lock);
+ cfs_spin_lock(&qctxt->lqc_lock);
if (!qctxt->lqc_import && oti) {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
LASSERT(oti && oti->oti_thread &&
oti->oti_thread->t_watchdog);
l_wait_event(qctxt->lqc_wait_for_qmaster, check_qm(qctxt),
&lwi);
CDEBUG(D_QUOTA, "wake up when quota master is back\n");
- lc_watchdog_touch(oti->oti_thread->t_watchdog);
+ lc_watchdog_touch(oti->oti_thread->t_watchdog,
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
} else {
- spin_unlock(&qctxt->lqc_lock);
+ cfs_spin_unlock(&qctxt->lqc_lock);
}
cycle++;
OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
/* after acquire(), we should run quota_check_common again
* so that we confirm there are enough quota to finish write */
- rc = acquire(obd, uid, gid, oti, isblk);
+ rc = acquire(obd, id, oti, isblk);
/* please reference to dqacq_completion for the below */
/* a new request is finished, try again */
- if (rc == -EAGAIN) {
+ if (rc == QUOTA_REQ_RETURNED) {
CDEBUG(D_QUOTA, "finish a quota req, try again\n");
continue;
}
break;
}
+ /* Related quota has been disabled by master, but enabled by
+ * slave, do not try again. */
+ if (unlikely(rc == -ESRCH)) {
+ CERROR("mismatched quota configuration, stop try.\n");
+ break;
+ }
+
+ if (isblk && (exp->exp_failed || exp->exp_abort_active_req))
+ /* The client has been evicted or tried to
+ * to reconnect already, abort the request */
+ RETURN(-ENOTCONN);
+
/* -EBUSY and others, wait a second and try again */
if (rc < 0) {
cfs_waitq_t waitq;
struct l_wait_info lwi;
if (oti && oti->oti_thread && oti->oti_thread->t_watchdog)
- lc_watchdog_touch(oti->oti_thread->t_watchdog);
+ lc_watchdog_touch(oti->oti_thread->t_watchdog,
+ CFS_GET_TIMEOUT(oti->oti_thread->t_svc));
CDEBUG(D_QUOTA, "rc: %d, count_err: %d\n", rc,
count_err++);
- init_waitqueue_head(&waitq);
+ cfs_waitq_init(&waitq);
lwi = LWI_TIMEOUT(cfs_time_seconds(min(cycle, 10)), NULL,
NULL);
l_wait_event(waitq, 0, &lwi);
}
- if (rc < 0 || cycle % 10 == 2) {
- spin_lock(&last_print_lock);
+ if (rc < 0 || cycle % 10 == 0) {
+ cfs_spin_lock(&last_print_lock);
if (last_print == 0 ||
cfs_time_before((last_print + cfs_time_seconds(30)),
cfs_time_current())) {
last_print = cfs_time_current();
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
CWARN("still haven't managed to acquire quota "
"space from the quota master after %d "
"retries (err=%d, rc=%d)\n",
cycle, count_err - 1, rc);
} else {
- spin_unlock(&last_print_lock);
+ cfs_spin_unlock(&last_print_lock);
}
}
CDEBUG(D_QUOTA, "recheck quota with rc: %d, cycle: %d\n", rc,
cycle);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_CHK_BLK :
LQUOTA_WAIT_FOR_CHK_INO,
timediff);
+ if (rc > 0)
+ rc = 0;
RETURN(rc);
}
* when a block_write or inode_create rpc is finished, adjust the record for
* pending blocks and inodes
*/
-static int quota_pending_commit(struct obd_device *obd, unsigned int uid,
- unsigned int gid, int pending, int isblk)
+static int quota_pending_commit(struct obd_device *obd, const unsigned int id[],
+ int pending[], int isblk)
{
struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
struct timeval work_start;
struct timeval work_end;
long timediff;
int i;
- __u32 id[MAXQUOTAS] = { uid, gid };
struct qunit_data qdata[MAXQUOTAS];
ENTRY;
CDEBUG(D_QUOTA, "commit pending quota for %s\n", obd->obd_name);
CLASSERT(MAXQUOTAS < 4);
- if (!sb_any_quota_enabled(qctxt->lqc_sb))
+ if (!ll_sb_any_quota_active(qctxt->lqc_sb))
RETURN(0);
- do_gettimeofday(&work_start);
+ cfs_gettimeofday(&work_start);
for (i = 0; i < MAXQUOTAS; i++) {
struct lustre_qunit_size *lqs = NULL;
+ LASSERT(pending[i] >= 0);
+ if (pending[i] == 0)
+ continue;
+
qdata[i].qd_id = id[i];
qdata[i].qd_flags = i;
if (isblk)
if (qdata[i].qd_id == 0 && !QDATA_IS_GRP(&qdata[i]))
continue;
- quota_search_lqs(&qdata[i], NULL, qctxt, &lqs);
- if (lqs) {
- int flag = 0;
- spin_lock(&lqs->lqs_lock);
- if (isblk) {
- if (lqs->lqs_bwrite_pending >= pending) {
- lqs->lqs_bwrite_pending -= pending;
- spin_unlock(&lqs->lqs_lock);
- flag = 1;
- } else {
- spin_unlock(&lqs->lqs_lock);
- CDEBUG(D_ERROR,
- "there are too many blocks!\n");
- }
- } else {
- if (lqs->lqs_iwrite_pending >= pending) {
- lqs->lqs_iwrite_pending -= pending;
- spin_unlock(&lqs->lqs_lock);
- flag = 1;
- } else {
- spin_unlock(&lqs->lqs_lock);
- CDEBUG(D_ERROR,
- "there are too many files!\n");
- }
- }
- CDEBUG(D_QUOTA, "lqs pending: %lu, pending: %d, "
- "isblk: %d.\n",
- isblk ? lqs->lqs_bwrite_pending :
- lqs->lqs_iwrite_pending, pending, isblk);
+ lqs = quota_search_lqs(LQS_KEY(i, qdata[i].qd_id), qctxt, 0);
+ if (lqs == NULL || IS_ERR(lqs)) {
+ CERROR("can not find lqs for pending_commit: "
+ "[id %u] [%c] [pending %u] [isblk %d] (rc %ld), "
+ "maybe cause unexpected lqs refcount error!\n",
+ id[i], i ? 'g': 'u', pending[i], isblk,
+ lqs ? PTR_ERR(lqs) : -1);
+ continue;
+ }
- lqs_putref(lqs);
- /* When lqs_*_pening is changed back, we'll putref lqs
- * here b=14784 */
- if (flag)
- lqs_putref(lqs);
+ cfs_spin_lock(&lqs->lqs_lock);
+ if (isblk) {
+ LASSERTF(lqs->lqs_bwrite_pending >= pending[i],
+ "there are too many blocks! [id %u] [%c] "
+ "[bwrite_pending %lu] [pending %u]\n",
+ id[i], i % 2 ? 'g' : 'u',
+ lqs->lqs_bwrite_pending, pending[i]);
+
+ lqs->lqs_bwrite_pending -= pending[i];
+ } else {
+ LASSERTF(lqs->lqs_iwrite_pending >= pending[i],
+ "there are too many files! [id %u] [%c] "
+ "[iwrite_pending %lu] [pending %u]\n",
+ id[i], i % 2 ? 'g' : 'u',
+ lqs->lqs_iwrite_pending, pending[i]);
+
+ lqs->lqs_iwrite_pending -= pending[i];
}
+ CDEBUG(D_QUOTA, "%s: lqs_pending=%lu pending[%d]=%d isblk=%d\n",
+ obd->obd_name,
+ isblk ? lqs->lqs_bwrite_pending : lqs->lqs_iwrite_pending,
+ i, pending[i], isblk);
+ cfs_spin_unlock(&lqs->lqs_lock);
+
+ /* for quota_search_lqs in pending_commit */
+ lqs_putref(lqs);
+ /* for quota_search_lqs in quota_check */
+ lqs_putref(lqs);
}
- do_gettimeofday(&work_end);
+ cfs_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
lprocfs_counter_add(qctxt->lqc_stats,
isblk ? LQUOTA_WAIT_FOR_COMMIT_BLK :
RETURN(0);
}
- init_rwsem(&obt->obt_rwsem);
+ cfs_init_rwsem(&obt->obt_rwsem);
obt->obt_qfmt = LUSTRE_QUOTA_V2;
mds->mds_quota_info.qi_version = LUSTRE_QUOTA_V2;
- atomic_set(&obt->obt_quotachecking, 1);
+ cfs_sema_init(&obt->obt_quotachecking, 1);
/* initialize quota master and quota context */
- sema_init(&mds->mds_qonoff_sem, 1);
+ cfs_init_rwsem(&mds->mds_qonoff_sem);
rc = qctxt_init(obd, dqacq_handler);
if (rc) {
- CERROR("initialize quota context failed! (rc:%d)\n", rc);
+ CERROR("%s: initialize quota context failed! (rc:%d)\n",
+ obd->obd_name, rc);
RETURN(rc);
}
mds->mds_quota = 1;
memset(&oqctl, 0, sizeof(oqctl));
oqctl.qc_type = UGQUOTA;
- down(&mds->mds_qonoff_sem);
+ cfs_down_write(&mds->mds_qonoff_sem);
mds_admin_quota_off(obd, &oqctl);
- up(&mds->mds_qonoff_sem);
+ cfs_up_write(&mds->mds_qonoff_sem);
RETURN(0);
}
-static int quota_acquire_common(struct obd_device *obd, unsigned int uid,
- unsigned int gid, struct obd_trans_info *oti,
- int isblk)
+static int quota_acquire_common(struct obd_device *obd, const unsigned int id[],
+ struct obd_trans_info *oti, int isblk)
{
struct lustre_quota_ctxt *qctxt = &obd->u.obt.obt_qctxt;
int rc;
ENTRY;
- rc = qctxt_adjust_qunit(obd, qctxt, uid, gid, isblk, 1, oti);
+ rc = qctxt_adjust_qunit(obd, qctxt, id, isblk, 1, oti);
RETURN(rc);
}
#endif /* __KERNEL__ */
struct osc_quota_info {
- struct list_head oqi_hash; /* hash list */
+ cfs_list_t oqi_hash; /* hash list */
struct client_obd *oqi_cli; /* osc obd */
unsigned int oqi_id; /* uid/gid of a file */
short oqi_type; /* quota type */
};
-spinlock_t qinfo_list_lock = SPIN_LOCK_UNLOCKED;
+cfs_spinlock_t qinfo_list_lock = CFS_SPIN_LOCK_UNLOCKED;
-static struct list_head qinfo_hash[NR_DQHASH];
+static cfs_list_t qinfo_hash[NR_DQHASH];
/* SLAB cache for client quota context */
cfs_mem_cache_t *qinfo_cachep = NULL;
/* caller must hold qinfo_list_lock */
static inline void insert_qinfo_hash(struct osc_quota_info *oqi)
{
- struct list_head *head = qinfo_hash +
+ cfs_list_t *head = qinfo_hash +
hashfn(oqi->oqi_cli, oqi->oqi_id, oqi->oqi_type);
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_add(&oqi->oqi_hash, head);
+ cfs_list_add(&oqi->oqi_hash, head);
}
/* caller must hold qinfo_list_lock */
static inline void remove_qinfo_hash(struct osc_quota_info *oqi)
{
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_del_init(&oqi->oqi_hash);
+ cfs_list_del_init(&oqi->oqi_hash);
}
/* caller must hold qinfo_list_lock */
ENTRY;
LASSERT_SPIN_LOCKED(&qinfo_list_lock);
- list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
+ cfs_list_for_each_entry(oqi, &qinfo_hash[hashent], oqi_hash) {
if (oqi->oqi_cli == cli &&
oqi->oqi_id == id && oqi->oqi_type == type)
return oqi;
struct osc_quota_info *oqi;
ENTRY;
- OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_STD, sizeof(*oqi));
+ OBD_SLAB_ALLOC(oqi, qinfo_cachep, CFS_ALLOC_IO, sizeof(*oqi));
if(!oqi)
RETURN(NULL);
OBD_SLAB_FREE(oqi, qinfo_cachep, sizeof(*oqi));
}
-int osc_quota_chkdq(struct client_obd *cli, unsigned int uid, unsigned int gid)
+int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
{
unsigned int id;
int cnt, rc = QUOTA_OK;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
struct osc_quota_info *oqi = NULL;
- id = (cnt == USRQUOTA) ? uid : gid;
+ id = (cnt == USRQUOTA) ? qid[USRQUOTA] : qid[GRPQUOTA];
oqi = find_qinfo(cli, id, cnt);
if (oqi) {
rc = NO_QUOTA;
break;
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
+ if (rc == NO_QUOTA)
+ CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
RETURN(rc);
}
-int osc_quota_setdq(struct client_obd *cli, unsigned int uid, unsigned int gid,
+int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
obd_flag valid, obd_flag flags)
{
unsigned int id;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- struct osc_quota_info *oqi, *old;
+ struct osc_quota_info *oqi = NULL, *old;
if (!(valid & ((cnt == USRQUOTA) ?
OBD_MD_FLUSRQUOTA : OBD_MD_FLGRPQUOTA)))
continue;
- id = (cnt == USRQUOTA) ? uid : gid;
+ id = (cnt == USRQUOTA) ? qid[USRQUOTA] : qid[GRPQUOTA];
noquota = (cnt == USRQUOTA) ?
(flags & OBD_FL_NO_USRQUOTA) : (flags & OBD_FL_NO_GRPQUOTA);
- oqi = alloc_qinfo(cli, id, cnt);
- if (oqi) {
- spin_lock(&qinfo_list_lock);
-
- old = find_qinfo(cli, id, cnt);
- if (old && !noquota)
- remove_qinfo_hash(old);
- else if (!old && noquota)
- insert_qinfo_hash(oqi);
-
- spin_unlock(&qinfo_list_lock);
+ if (noquota) {
+ oqi = alloc_qinfo(cli, id, cnt);
+ if (!oqi) {
+ rc = -ENOMEM;
+ CDEBUG(D_QUOTA, "setdq for %s %d failed, "
+ "(rc = %d)\n",
+ cnt == USRQUOTA ? "user" : "group",
+ id, rc);
+ break;
+ }
+ }
- if (old || !noquota)
+ cfs_spin_lock(&qinfo_list_lock);
+ old = find_qinfo(cli, id, cnt);
+ if (old && !noquota)
+ remove_qinfo_hash(old);
+ else if (!old && noquota)
+ insert_qinfo_hash(oqi);
+ cfs_spin_unlock(&qinfo_list_lock);
+
+ if (old && !noquota)
+ CDEBUG(D_QUOTA, "setdq to remove for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
+ else if (!old && noquota)
+ CDEBUG(D_QUOTA, "setdq to insert for %s %d\n",
+ cnt == USRQUOTA ? "user" : "group", id);
+
+ if (old) {
+ if (noquota)
free_qinfo(oqi);
- if (old && !noquota)
+ else
free_qinfo(old);
- } else {
- CERROR("not enough mem!\n");
- rc = -ENOMEM;
- break;
}
}
int i;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
if (oqi->oqi_cli != cli)
continue;
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
RETURN(0);
}
int i, rc;
ENTRY;
- spin_lock(&qinfo_list_lock);
+ cfs_spin_lock(&qinfo_list_lock);
for (i = 0; i < NR_DQHASH; i++) {
- list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
+ cfs_list_for_each_entry_safe(oqi, n, &qinfo_hash[i], oqi_hash) {
remove_qinfo_hash(oqi);
free_qinfo(oqi);
}
}
- spin_unlock(&qinfo_list_lock);
+ cfs_spin_unlock(&qinfo_list_lock);
rc = cfs_mem_cache_destroy(qinfo_cachep);
LASSERTF(rc == 0, "couldn't destory qinfo_cachep slab\n");