* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
#define DEBUG_SUBSYSTEM S_LQUOTA
+#include <linux/kthread.h>
#include "qsd_internal.h"
-extern struct kmem_cache *upd_kmem;
-
/*
* Allocate and fill an qsd_upd_rec structure to be processed by the writeback
* thread.
if (!qsd->qsd_stopping) {
list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
/* wake up the upd thread */
- wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
+ if (qsd->qsd_upd_task)
+ wake_up_process(qsd->qsd_upd_task);
} else {
CWARN("%s: discard update.\n", qsd->qsd_svname);
if (upd->qur_lqe)
* updates. We should just delete the legacy record in such
* case. */
if (upd->qur_ver == tmp->qur_ver) {
- LASSERT(tmp->qur_lqe);
- LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
- "with ver:"LPU64"", tmp->qur_ver);
+ if (tmp->qur_lqe)
+ LQUOTA_WARN(tmp->qur_lqe, "Found a conflict "
+ "record with ver:%llu",
+ tmp->qur_ver);
+ else
+ CWARN("%s: Found a conflict record with ver: "
+ "%llu\n", qsd->qsd_svname, tmp->qur_ver);
+
list_del_init(&tmp->qur_link);
qsd_upd_free(tmp);
} else if (upd->qur_ver < tmp->qur_ver) {
/* drop this update */
list_del_init(&upd->qur_link);
CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
- LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
+ "%llu/%llu, global:%d, qid:%llu\n",
qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
upd->qur_global, upd->qur_qid.qid_uid);
qsd_upd_free(upd);
RETURN_EXIT;
CDEBUG(D_QUOTA, "%s: found deferred update record. "
- "version:"LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
+ "version:%llu/%llu, global:%d, qid:%llu\n",
qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
upd->qur_global, upd->qur_qid.qid_uid);
- LASSERTF(upd->qur_ver > ver, "lur_ver:"LPU64", cur_ver:"LPU64"\n",
+ LASSERTF(upd->qur_ver > ver, "lur_ver:%llu, cur_ver:%llu\n",
upd->qur_ver, ver);
/* Kick off the deferred udpate */
__u64 cur_ver;
ENTRY;
- CDEBUG(D_QUOTA, "%s: schedule update. global:%s, version:"LPU64"\n",
+ CDEBUG(D_QUOTA, "%s: schedule update. global:%s, version:%llu\n",
qsd->qsd_svname, global ? "true" : "false", ver);
upd = qsd_upd_alloc(qqi, lqe, qid, rec, ver, global);
/* legitimate race between glimpse AST and
* reintegration */
CDEBUG(D_QUOTA, "%s: discarding glb update from glimpse"
- " ver:"LPU64" local ver:"LPU64"\n",
+ " ver:%llu local ver:%llu\n",
qsd->qsd_svname, ver, cur_ver);
else
- CERROR("%s: discard slv update, ver:"LPU64" local ver:"
- LPU64"\n", qsd->qsd_svname, ver, cur_ver);
+ CERROR("%s: discard slv update, ver:%llu local ver:"
+ "%llu\n", qsd->qsd_svname, ver, cur_ver);
qsd_upd_free(upd);
} else if ((ver == cur_ver + 1) && qqi->qqi_glb_uptodate &&
qqi->qqi_slv_uptodate) {
GOTO(out, rc);
/* refresh usage */
qsd_refresh_usage(env, lqe);
+
+ spin_lock(&qqi->qqi_qsd->qsd_adjust_lock);
+ lqe->lqe_adjust_time = 0;
+ spin_unlock(&qqi->qqi_qsd->qsd_adjust_lock);
+
/* Report usage asynchronously */
rc = qsd_adjust(env, lqe);
if (rc)
rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
upd->qur_ver, &upd->qur_rec);
out:
+ if (upd->qur_global && rc == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_softlimit == 0 &&
+ upd->qur_rec.lqr_glb_rec.qbr_hardlimit == 0 &&
+ (LQUOTA_FLAG(upd->qur_rec.lqr_glb_rec.qbr_time) &
+ LQUOTA_FLAG_DEFAULT)) {
+ lqe->lqe_is_default = true;
+ if (qqi->qqi_default_softlimit == 0 &&
+ qqi->qqi_default_hardlimit == 0)
+ lqe->lqe_enforced = false;
+ else
+ lqe->lqe_enforced = true;
+
+ LQUOTA_DEBUG(lqe, "update to use default quota");
+ }
+
if (lqe && !IS_ERR(lqe)) {
lqe_putref(lqe);
upd->qur_lqe = NULL;
}
if (list_empty(&lqe->lqe_link)) {
- if (cancel)
+ if (!cancel) {
+ lqe->lqe_adjust_time = ktime_get_seconds();
+ if (defer)
+ lqe->lqe_adjust_time += QSD_WB_INTERVAL;
+ } else {
lqe->lqe_adjust_time = 0;
- else
- lqe->lqe_adjust_time = defer ?
- cfs_time_shift_64(QSD_WB_INTERVAL) :
- cfs_time_current_64();
- /* lqe reference transfered to list */
+ }
+
+ /* lqe reference transferred to list */
if (defer)
list_add_tail(&lqe->lqe_link,
&qsd->qsd_adjust_list);
}
spin_unlock(&qsd->qsd_adjust_lock);
- if (added)
- wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
- else
+ if (!added)
lqe_putref(lqe);
+ else {
+ read_lock(&qsd->qsd_lock);
+ if (qsd->qsd_upd_task)
+ wake_up_process(qsd->qsd_upd_task);
+ read_unlock(&qsd->qsd_lock);
+ }
}
/* return true if there is pending writeback records or the pending
struct lquota_entry *lqe;
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
- if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cfs_time_current_64()))
+ if (ktime_get_seconds() >= lqe->lqe_adjust_time)
job_pending = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
job_pending = true;
}
- if (qsd->qsd_acct_failed) {
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
+ struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
+
/* don't bother kicking off reintegration if space accounting
* failed to be enabled */
- write_unlock(&qsd->qsd_lock);
- return job_pending;
- }
-
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
- struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
+ if (qqi->qqi_acct_failed)
+ continue;
if (!qsd_type_enabled(qsd, qtype))
continue;
return job_pending;
}
-static int qsd_upd_thread(void *arg)
+struct qsd_upd_args {
+ struct qsd_instance *qua_inst;
+ struct lu_env qua_env;
+ struct completion *qua_started;
+};
+
+#ifndef TASK_IDLE
+/* This identity is only safe inside kernel threads, or other places where
+ * all signals are disabled. So it is placed here rather than in an include
+ * file.
+ * TASK_IDLE was added in v4.1-rc4-43-g80ed87c8a9ca so this can be removed
+ * when we no longer support kernels older than that.
+ */
+#define TASK_IDLE TASK_INTERRUPTIBLE
+#endif
+
+static int qsd_upd_thread(void *_args)
{
- struct qsd_instance *qsd = (struct qsd_instance *)arg;
- struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi;
- struct list_head queue;
+ struct qsd_upd_args *args = _args;
+ struct qsd_instance *qsd = args->qua_inst;
+ LIST_HEAD(queue);
struct qsd_upd_rec *upd, *n;
- struct lu_env *env;
+ struct lu_env *env = &args->qua_env;
int qtype, rc = 0;
bool uptodate;
struct lquota_entry *lqe;
- __u64 cur_time;
+ time64_t cur_time;
ENTRY;
- OBD_ALLOC_PTR(env);
- if (env == NULL)
- RETURN(-ENOMEM);
-
- rc = lu_env_init(env, LCT_DT_THREAD);
- if (rc) {
- CERROR("%s: cannot init env: rc = %d\n", qsd->qsd_svname, rc);
- OBD_FREE_PTR(env);
- RETURN(rc);
- }
+ complete(args->qua_started);
+ while (({set_current_state(TASK_IDLE);
+ !kthread_should_stop(); })) {
- thread_set_flags(thread, SVC_RUNNING);
- wake_up(&thread->t_ctl_waitq);
-
- INIT_LIST_HEAD(&queue);
- lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
- while (1) {
- l_wait_event(thread->t_ctl_waitq,
- qsd_job_pending(qsd, &queue, &uptodate) ||
- !thread_is_running(thread), &lwi);
+ if (!qsd_job_pending(qsd, &queue, &uptodate))
+ schedule_timeout(cfs_time_seconds(QSD_WB_INTERVAL));
+ __set_current_state(TASK_RUNNING);
list_for_each_entry_safe(upd, n, &queue, qur_link) {
list_del_init(&upd->qur_link);
}
spin_lock(&qsd->qsd_adjust_lock);
- cur_time = cfs_time_current_64();
+ cur_time = ktime_get_seconds();
while (!list_empty(&qsd->qsd_adjust_list)) {
lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
/* deferred items are sorted by time */
- if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
- cur_time))
+ if (lqe->lqe_adjust_time > cur_time)
break;
list_del_init(&lqe->lqe_link);
spin_unlock(&qsd->qsd_adjust_lock);
- if (thread_is_running(thread) && uptodate) {
+ if (!kthread_should_stop() && uptodate) {
qsd_refresh_usage(env, lqe);
if (lqe->lqe_adjust_time == 0)
qsd_id_lock_cancel(env, lqe);
}
spin_unlock(&qsd->qsd_adjust_lock);
- if (!thread_is_running(thread))
- break;
-
- if (uptodate)
+ if (uptodate || kthread_should_stop())
continue;
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++)
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++)
qsd_start_reint_thread(qsd->qsd_type_array[qtype]);
}
+ __set_current_state(TASK_RUNNING);
+
lu_env_fini(env);
- OBD_FREE_PTR(env);
- thread_set_flags(thread, SVC_STOPPED);
- wake_up(&thread->t_ctl_waitq);
+ OBD_FREE_PTR(args);
+
RETURN(rc);
}
int qsd_start_upd_thread(struct qsd_instance *qsd)
{
- struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
- struct task_struct *task;
+ struct qsd_upd_args *args;
+ struct task_struct *task;
+ DECLARE_COMPLETION_ONSTACK(started);
+ int rc;
ENTRY;
- task = kthread_run(qsd_upd_thread, (void *)qsd,
- "lquota_wb_%s", qsd->qsd_svname);
+ OBD_ALLOC_PTR(args);
+ if (args == NULL)
+ RETURN(-ENOMEM);
+
+ rc = lu_env_init(&args->qua_env, LCT_DT_THREAD);
+ if (rc) {
+ CERROR("%s: cannot init env: rc = %d\n", qsd->qsd_svname, rc);
+ goto out_free;
+ }
+ args->qua_inst = qsd;
+ args->qua_started = &started;
+
+ task = kthread_create(qsd_upd_thread, args,
+ "lquota_wb_%s", qsd->qsd_svname);
if (IS_ERR(task)) {
- CERROR("fail to start quota update thread: rc = %ld\n",
- PTR_ERR(task));
- thread_set_flags(thread, SVC_STOPPED);
- RETURN(PTR_ERR(task));
+ rc = PTR_ERR(task);
+ CERROR("fail to start quota update thread: rc = %d\n", rc);
+ goto out_fini;
}
+ qsd->qsd_upd_task = task;
+ wake_up_process(task);
+ wait_for_completion(&started);
- l_wait_event(thread->t_ctl_waitq,
- thread_is_running(thread) || thread_is_stopped(thread),
- &lwi);
RETURN(0);
+
+out_fini:
+ lu_env_fini(&args->qua_env);
+out_free:
+ OBD_FREE_PTR(args);
+ RETURN(rc);
}
static void qsd_cleanup_deferred(struct qsd_instance *qsd)
{
int qtype;
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
struct qsd_upd_rec *upd, *tmp;
struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
write_lock(&qsd->qsd_lock);
list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
qur_link) {
- CWARN("%s: Free global deferred upd: ID:"LPU64", "
- "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
+ CWARN("%s: Free global deferred upd: ID:%llu, "
+ "ver:%llu/%llu\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
qqi->qqi_glb_ver);
list_del_init(&upd->qur_link);
}
list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
qur_link) {
- CWARN("%s: Free slave deferred upd: ID:"LPU64", "
- "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
+ CWARN("%s: Free slave deferred upd: ID:%llu, "
+ "ver:%llu/%llu\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
qqi->qqi_slv_ver);
list_del_init(&upd->qur_link);
void qsd_stop_upd_thread(struct qsd_instance *qsd)
{
- struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
- struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
- if (!thread_is_stopped(thread)) {
- thread_set_flags(thread, SVC_STOPPING);
- wake_up(&thread->t_ctl_waitq);
+ write_lock(&qsd->qsd_lock);
+ task = qsd->qsd_upd_task;
+ qsd->qsd_upd_task = NULL;
+ write_unlock(&qsd->qsd_lock);
+ if (task)
+ kthread_stop(task);
- l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
- &lwi);
- }
qsd_cleanup_deferred(qsd);
qsd_cleanup_adjust(qsd);
}