__u64 lme_gracetime;
/* last time we glimpsed */
- __u64 lme_revoke_time;
+ time64_t lme_revoke_time;
/* r/w semaphore used to protect concurrent access to the quota
* parameters which are stored on disk */
__u64 lse_usage;
/* time to trigger quota adjust */
- __u64 lse_adjust_time;
+ time64_t lse_adjust_time;
/* return code of latest acquire RPC */
int lse_acq_rc;
/* when latest acquire RPC completed */
- __u64 lse_acq_time;
+ time64_t lse_acq_time;
/* when latest edquot set */
- __u64 lse_edquot_time;
+ time64_t lse_edquot_time;
};
/* In-memory entry for each enforced quota id
libcfs_debug_vmsg2(msgdata, fmt, args,
"qmt:%s pool:%d-%s id:%llu enforced:%d hard:%llu"
" soft:%llu granted:%llu time:%llu qunit:"
- "%llu edquot:%d may_rel:%llu revoke:%llu\n",
+ "%llu edquot:%d may_rel:%llu revoke:%lld\n",
pool->qpi_qmt->qmt_svname,
pool->qpi_key & 0x0000ffff,
RES_NAME(pool->qpi_key >> 16),
/* See comment in qmt_adjust_qunit(). LU-4139 */
if (qmt_hard_exhausted(lqe) ||
pool->qpi_key >> 16 != LQUOTA_RES_DT) {
+ time64_t lapse;
+
/* we haven't reached the minimal qunit yet so there is
* still hope that the rebalancing process might free
* up some quota space */
RETURN_EXIT;
/* Let's give more time to slave to release space */
- if (lqe->lqe_may_rel != 0 &&
- cfs_time_before_64(cfs_time_shift_64(
- -QMT_REBA_TIMEOUT),
- lqe->lqe_revoke_time))
+ lapse = ktime_get_seconds() - QMT_REBA_TIMEOUT;
+ if (lqe->lqe_may_rel != 0 && lqe->lqe_revoke_time > lapse)
RETURN_EXIT;
} else {
if (lqe->lqe_qunit > pool->qpi_soft_least_qunit)
qmt_id_lock_notify(pool->qpi_qmt, lqe);
else if (lqe->lqe_qunit == pool->qpi_least_qunit)
/* initial qunit value is the smallest one */
- lqe->lqe_revoke_time = cfs_time_current_64();
+ lqe->lqe_revoke_time = ktime_get_seconds();
EXIT;
}
* were initialized */
qmt_adjust_qunit(env, lqe);
if (lqe->lqe_qunit != 0)
- qmt_adjust_edquot(lqe, cfs_time_current_sec());
+ qmt_adjust_edquot(lqe, ktime_get_real_seconds());
}
}
struct qmt_thread_info *qti = qmt_info(env);
struct lquota_entry *lqe;
struct thandle *th = NULL;
- __u64 ver, now;
+ time64_t now;
+ __u64 ver;
bool dirtied = false;
int rc = 0;
ENTRY;
if (IS_ERR(th))
GOTO(out_nolock, rc = PTR_ERR(th));
- now = cfs_time_current_sec();
+ now = ktime_get_real_seconds();
lqe_write_lock(lqe);
LQUOTA_DEBUG(lqe, "changing quota settings valid:%x hard:%llu soft:"
slv_granted_bck = slv_granted;
/* record current time for soft limit & grace time management */
- now = (__u64)cfs_time_current_sec();
+ now = ktime_get_real_seconds();
if (req_is_rel(qb_flags)) {
/* Slave would like to release quota space */
lqe_write_lock(lqe);
if (lqe->lqe_revoke_time == 0 &&
lqe->lqe_qunit == pool->qpi_least_qunit)
- lqe->lqe_revoke_time = cfs_time_current_64();
+ lqe->lqe_revoke_time = ktime_get_seconds();
lqe_write_unlock(lqe);
RETURN_EXIT;
}
if (lqe->lqe_revoke_time == 0 &&
qti->qti_gl_desc.lquota_desc.gl_qunit == pool->qpi_least_qunit &&
lqe->lqe_qunit == pool->qpi_least_qunit) {
- lqe->lqe_revoke_time = cfs_time_current_64();
- qmt_adjust_edquot(lqe, cfs_time_current_sec());
+ lqe->lqe_revoke_time = ktime_get_seconds();
+ qmt_adjust_edquot(lqe, ktime_get_real_seconds());
}
LASSERT(lqe->lqe_gl);
lqe->lqe_gl = false;
adjust = qsd_adjust_needed(lqe);
if (reqbody && req_is_acq(reqbody->qb_flags) && ret != -EDQUOT) {
lqe->lqe_acq_rc = ret;
- lqe->lqe_acq_time = cfs_time_current_64();
+ lqe->lqe_acq_time = ktime_get_seconds();
}
out_noadjust:
qsd_request_exit(lqe);
* sometimes due to the race reply of dqacq vs. id lock glimpse
* (see LU-4505), so we revalidate it every 5 seconds. */
} else if (lqe->lqe_edquot &&
- cfs_time_before_64(cfs_time_shift_64(-5),
- lqe->lqe_edquot_time)) {
+ (lqe->lqe_edquot_time > ktime_get_seconds() - 5)) {
rc = -EDQUOT;
}else {
rc = -EAGAIN;
/* check whether an acquire request completed recently */
if (lqe->lqe_acq_rc != 0 &&
- cfs_time_before_64(cfs_time_shift_64(-1), lqe->lqe_acq_time)) {
+ lqe->lqe_acq_time > ktime_get_seconds() - 1) {
lqe_write_unlock(lqe);
LQUOTA_DEBUG(lqe, "using cached return code %d", lqe->lqe_acq_rc);
RETURN(lqe->lqe_acq_rc);
{
lqe->lqe_edquot = edquot;
if (edquot)
- lqe->lqe_edquot_time = cfs_time_current_64();
+ lqe->lqe_edquot_time = ktime_get_seconds();
}
#define QSD_WB_INTERVAL 60 /* 60 seconds */
}
if (list_empty(&lqe->lqe_link)) {
- if (cancel)
+ if (!cancel) {
+ lqe->lqe_adjust_time = ktime_get_seconds();
+ if (defer)
+ lqe->lqe_adjust_time += QSD_WB_INTERVAL;
+ } else {
lqe->lqe_adjust_time = 0;
- else
- lqe->lqe_adjust_time = defer ?
- cfs_time_shift_64(QSD_WB_INTERVAL) :
- cfs_time_current_64();
+ }
+
/* lqe reference transferred to list */
if (defer)
list_add_tail(&lqe->lqe_link,
struct lquota_entry *lqe;
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
- if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cfs_time_current_64()))
+ if (ktime_get_seconds() > lqe->lqe_adjust_time)
job_pending = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
int qtype, rc = 0;
bool uptodate;
struct lquota_entry *lqe;
- __u64 cur_time;
+ time64_t cur_time;
ENTRY;
OBD_ALLOC_PTR(env);
}
spin_lock(&qsd->qsd_adjust_lock);
- cur_time = cfs_time_current_64();
+ cur_time = ktime_get_seconds();
while (!list_empty(&qsd->qsd_adjust_list)) {
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
/* deferred items are sorted by time */
- if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cur_time))
+ if (lqe->lqe_adjust_time > cur_time)
break;
list_del_init(&lqe->lqe_link);