* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
#include <linux/kthread.h>
#include "qsd_internal.h"
-extern struct kmem_cache *upd_kmem;
-
/*
* Allocate and fill an qsd_upd_rec structure to be processed by the writeback
* thread.
GOTO(out, rc);
/* refresh usage */
qsd_refresh_usage(env, lqe);
+
+ spin_lock(&qqi->qqi_qsd->qsd_adjust_lock);
+ lqe->lqe_adjust_time = 0;
+ spin_unlock(&qqi->qqi_qsd->qsd_adjust_lock);
+
/* Report usage asynchronously */
rc = qsd_adjust(env, lqe);
if (rc)
rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
upd->qur_ver, &upd->qur_rec);
out:
+ if (upd->qur_global && rc == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_softlimit == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_hardlimit == 0 &&
+ (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) &
+ LQUOTA_FLAG_DEFAULT)) {
+ lqe->lqe_is_default = true;
+ if (qqi->qqi_default_softlimit == 0 &&
+ qqi->qqi_default_hardlimit == 0)
+ lqe->lqe_enforced = false;
+ else
+ lqe->lqe_enforced = true;
+
+ LQUOTA_DEBUG(lqe, "update to use default quota");
+ }
+
if (lqe && !IS_ERR(lqe)) {
lqe_putref(lqe);
upd->qur_lqe = NULL;
struct lquota_entry *lqe;
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
- if (ktime_get_seconds() > lqe->lqe_adjust_time)
+ if (ktime_get_seconds() >= lqe->lqe_adjust_time)
job_pending = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
{
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi;
- struct list_head queue;
+ LIST_HEAD(queue);
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
- INIT_LIST_HEAD(&queue);
- lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
while (1) {
- l_wait_event(thread->t_ctl_waitq,
- qsd_job_pending(qsd, &queue, &uptodate) ||
- !thread_is_running(thread), &lwi);
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ qsd_job_pending(qsd, &queue, &uptodate) ||
+ !thread_is_running(thread),
+ cfs_time_seconds(QSD_WB_INTERVAL));
list_for_each_entry_safe(upd, n, &queue, qur_link) {
list_del_init(&upd->qur_link);
int qsd_start_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
struct task_struct *task;
ENTRY;
RETURN(PTR_ERR(task));
}
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread));
RETURN(0);
}
void qsd_stop_upd_thread(struct qsd_instance *qsd)
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
wake_up(&thread->t_ctl_waitq);
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
+ wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread));
}
qsd_cleanup_deferred(qsd);
qsd_cleanup_adjust(qsd);