* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
#include <linux/kthread.h>
#include "qsd_internal.h"
-extern struct kmem_cache *upd_kmem;
-
/*
* Allocate and fill an qsd_upd_rec structure to be processed by the writeback
* thread.
GOTO(out, rc);
/* refresh usage */
qsd_refresh_usage(env, lqe);
+
+ spin_lock(&qqi->qqi_qsd->qsd_adjust_lock);
+ lqe->lqe_adjust_time = 0;
+ spin_unlock(&qqi->qqi_qsd->qsd_adjust_lock);
+
/* Report usage asynchronously */
rc = qsd_adjust(env, lqe);
if (rc)
rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
upd->qur_ver, &upd->qur_rec);
out:
+ if (upd->qur_global && rc == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_softlimit == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_hardlimit == 0 &&
+ (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) &
+ LQUOTA_FLAG_DEFAULT)) {
+ lqe->lqe_is_default = true;
+ if (qqi->qqi_default_softlimit == 0 &&
+ qqi->qqi_default_hardlimit == 0)
+ lqe->lqe_enforced = false;
+ else
+ lqe->lqe_enforced = true;
+
+ LQUOTA_DEBUG(lqe, "update to use default quota");
+ }
+
if (lqe && !IS_ERR(lqe)) {
lqe_putref(lqe);
upd->qur_lqe = NULL;
}
if (list_empty(&lqe->lqe_link)) {
- if (cancel)
+ if (!cancel) {
+ lqe->lqe_adjust_time = ktime_get_seconds();
+ if (defer)
+ lqe->lqe_adjust_time += QSD_WB_INTERVAL;
+ } else {
lqe->lqe_adjust_time = 0;
- else
- lqe->lqe_adjust_time = defer ?
- cfs_time_shift_64(QSD_WB_INTERVAL) :
- cfs_time_current_64();
+ }
+
/* lqe reference transferred to list */
if (defer)
list_add_tail(&lqe->lqe_link,
struct lquota_entry *lqe;
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
- if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cfs_time_current_64()))
+ if (ktime_get_seconds() >= lqe->lqe_adjust_time)
job_pending = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
job_pending = true;
}
- if (qsd->qsd_acct_failed) {
- /* don't bother kicking off reintegration if space accounting
- * failed to be enabled */
- write_unlock(&qsd->qsd_lock);
- return job_pending;
- }
-
for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
+ /* don't bother kicking off reintegration if space accounting
+ * failed to be enabled */
+ if (qqi->qqi_acct_failed)
+ continue;
+
if (!qsd_type_enabled(qsd, qtype))
continue;
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi;
- struct list_head queue;
+ LIST_HEAD(queue);
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
bool uptodate;
struct lquota_entry *lqe;
- __u64 cur_time;
+ time64_t cur_time;
ENTRY;
OBD_ALLOC_PTR(env);
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
- INIT_LIST_HEAD(&queue);
- lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
while (1) {
- l_wait_event(thread->t_ctl_waitq,
- qsd_job_pending(qsd, &queue, &uptodate) ||
- !thread_is_running(thread), &lwi);
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ qsd_job_pending(qsd, &queue, &uptodate) ||
+ !thread_is_running(thread),
+ cfs_time_seconds(QSD_WB_INTERVAL));
list_for_each_entry_safe(upd, n, &queue, qur_link) {
list_del_init(&upd->qur_link);
}
spin_lock(&qsd->qsd_adjust_lock);
- cur_time = cfs_time_current_64();
+ cur_time = ktime_get_seconds();
while (!list_empty(&qsd->qsd_adjust_list)) {
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
/* deferred items are sorted by time */
- if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cur_time))
+ if (lqe->lqe_adjust_time > cur_time)
break;
list_del_init(&lqe->lqe_link);
int qsd_start_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(PTR_ERR(task));
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
void qsd_stop_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
wake_up(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread));
}
qsd_cleanup_deferred(qsd);
qsd_cleanup_adjust(qsd);