* GPL HEADER END
*/
/*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
}
if (lqe->lqe_pending_rel != 0) {
- LQUOTA_ERROR(lqe, "no request in flight with pending_rel="LPU64,
+ LQUOTA_ERROR(lqe, "no request in flight with pending_rel=%llu",
lqe->lqe_pending_rel);
LBUG();
}
* the DQACQ since the limit for this ID has been removed, so we
* should not update quota entry & slave index copy neither. */
if (repbody != NULL && repbody->qb_count != 0) {
- LQUOTA_DEBUG(lqe, "DQACQ qb_count:"LPU64, repbody->qb_count);
+ LQUOTA_DEBUG(lqe, "DQACQ qb_count:%llu", repbody->qb_count);
if (req_is_rel(reqbody->qb_flags)) {
if (lqe->lqe_granted < repbody->qb_count) {
LQUOTA_ERROR(lqe, "can't release more space "
- "than owned "LPU64"<"LPU64,
+ "than owned %llu<%llu",
lqe->lqe_granted,
repbody->qb_count);
lqe->lqe_granted = 0;
}
/* extract information from lvb */
- if (ret == 0 && lvb != 0) {
+ if (ret == 0 && lvb != NULL) {
if (lvb->lvb_id_qunit != 0)
qsd_set_qunit(lqe, lvb->lvb_id_qunit);
qsd_set_edquot(lqe, !!(lvb->lvb_flags & LQUOTA_FL_EDQUOT));
adjust = qsd_adjust_needed(lqe);
if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) {
lqe->lqe_acq_rc = ret;
- lqe->lqe_acq_time = cfs_time_current_64();
+ lqe->lqe_acq_time = ktime_get_seconds();
}
out_noadjust:
qsd_request_exit(lqe);
* sometimes due to the race reply of dqacq vs. id lock glimpse
* (see LU-4505), so we revalidate it every 5 seconds. */
} else if (lqe->lqe_edquot &&
- cfs_time_before_64(cfs_time_shift_64(-5),
- lqe->lqe_edquot_time)) {
+ (lqe->lqe_edquot_time > ktime_get_seconds() - 5)) {
rc = -EDQUOT;
}else {
rc = -EAGAIN;
/* check whether an acquire request completed recently */
if (lqe->lqe_acq_rc != 0 &&
- cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)) {
+ lqe->lqe_acq_time > ktime_get_seconds() - 1) {
lqe_write_unlock(lqe);
LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
RETURN(lqe->lqe_acq_rc);
ENTRY;
for (count = 0; rc == 0; count++) {
- LQUOTA_DEBUG(lqe, "acquiring:"LPD64 " count=%d", space, count);
+ LQUOTA_DEBUG(lqe, "acquiring:%lld count=%d", space, count);
if (lqe2qqi(lqe)->qqi_qsd->qsd_stopping) {
rc = -EINPROGRESS;
* rc < 0, something bad happened */
break;
+ /* if we have gotten some quota and stil wait more quota,
+ * it's better to give QMT some time to reclaim from clients */
+ if (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+
/* need to acquire more quota space from master */
rc = qsd_acquire_remote(env, lqe);
}
struct lquota_id_info *qid, long long space,
int *flags)
{
- struct lquota_entry *lqe;
- int rc, ret = -EINPROGRESS;
- struct l_wait_info lwi;
+ struct lquota_entry *lqe;
+ struct l_wait_info lwi;
+ int qtype_flag = 0;
+ int rc, ret = -EINPROGRESS;
ENTRY;
if (qid->lqi_qentry != NULL) {
RETURN(0);
}
- LQUOTA_DEBUG(lqe, "op_begin space:"LPD64, space);
+ LQUOTA_DEBUG(lqe, "op_begin space:%lld", space);
lqe_write_lock(lqe);
lqe->lqe_waiting_write += space;
out_flags:
LASSERT(qid->lqi_is_blk);
if (rc != 0) {
- *flags |= LQUOTA_OVER_FL(qqi->qqi_qtype);
+ *flags |= lquota_over_fl(qqi->qqi_qtype);
} else {
__u64 usage;
usage += lqe->lqe_waiting_write;
usage += qqi->qqi_qsd->qsd_sync_threshold;
+ qtype_flag = lquota_over_fl(qqi->qqi_qtype);
/* if we should notify client to start sync write */
if (usage >= lqe->lqe_granted - lqe->lqe_pending_rel)
- *flags |= LQUOTA_OVER_FL(qqi->qqi_qtype);
+ *flags |= qtype_flag;
else
- *flags &= ~LQUOTA_OVER_FL(qqi->qqi_qtype);
+ *flags &= ~qtype_flag;
lqe_read_unlock(lqe);
}
}
if (unlikely(qsd == NULL))
RETURN(0);
+ if (qsd->qsd_dev->dd_rdonly)
+ RETURN(0);
+
/* We don't enforce quota until the qsd_instance is started */
read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
* or - the user/group is root
* or - quota accounting isn't enabled */
if (!qsd_type_enabled(qsd, qi->lqi_type) || qi->lqi_id.qid_uid == 0 ||
- qsd->qsd_acct_failed)
+ (qsd->qsd_type_array[qi->lqi_type])->qqi_acct_failed)
RETURN(0);
LASSERTF(trans->lqt_id_cnt <= QUOTA_MAX_TRANSIDS, "id_cnt=%d\n",
qqi = lqe2qqi(lqe);
qsd = qqi->qqi_qsd;
+ if (qsd->qsd_dev->dd_rdonly)
+ RETURN(0);
+
lqe_write_lock(lqe);
/* fill qb_count & qb_flags */
if (unlikely(qsd == NULL))
RETURN_EXIT;
+ if (qsd->qsd_dev->dd_rdonly)
+ RETURN_EXIT;
+
/* We don't enforce quota until the qsd_instance is started */
read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
lqe = lqe_locate(env, qqi->qqi_site, qid);
if (IS_ERR(lqe)) {
- CERROR("%s: fail to locate lqe for id:"LPU64", type:%d\n",
+ CERROR("%s: fail to locate lqe for id:%llu, type:%d\n",
qsd->qsd_svname, qid->qid_uid, qtype);
RETURN_EXIT;
}