* GPL HEADER END
*/
/*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
* Use is subject to license terms.
*
* Author: Johann Lombardi <johann.lombardi@intel.com>
#define DEBUG_SUBSYSTEM S_LQUOTA
+#include <linux/kthread.h>
#include "qsd_internal.h"
extern struct kmem_cache *upd_kmem;
{
struct qsd_upd_rec *upd;
- OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, GFP_NOFS);
if (upd == NULL) {
- CERROR("Failed to allocate upd");
return NULL;
}
/* fill it */
- CFS_INIT_LIST_HEAD(&upd->qur_link);
+ INIT_LIST_HEAD(&upd->qur_link);
upd->qur_qqi = qqi;
upd->qur_lqe = lqe;
if (lqe)
}
/* must hold the qsd_lock */
-static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+static void qsd_add_deferred(struct qsd_instance *qsd, struct list_head *list,
struct qsd_upd_rec *upd)
{
struct qsd_upd_rec *tmp, *n;
}
/* Sort the updates in ascending order */
- cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
+ list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
/* There could be some legacy records which have duplicated
* version. Imagine following scenario: slave received global
* updates. We should just delete the legacy record in such
* case. */
if (upd->qur_ver == tmp->qur_ver) {
- LASSERT(tmp->qur_lqe);
- LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
- "with ver:"LPU64"", tmp->qur_ver);
- cfs_list_del_init(&tmp->qur_link);
+ if (tmp->qur_lqe)
+ LQUOTA_WARN(tmp->qur_lqe, "Found a conflict "
+ "record with ver:%llu",
+ tmp->qur_ver);
+ else
+ CWARN("%s: Found a conflict record with ver: "
+ "%llu\n", qsd->qsd_svname, tmp->qur_ver);
+
+ list_del_init(&tmp->qur_link);
qsd_upd_free(tmp);
- }
-
- if (upd->qur_ver < tmp->qur_ver) {
+ } else if (upd->qur_ver < tmp->qur_ver) {
continue;
} else {
- cfs_list_add_tail(&upd->qur_link, &tmp->qur_link);
+ list_add_tail(&upd->qur_link, &tmp->qur_link);
return;
}
}
- cfs_list_add(&upd->qur_link, list);
+ list_add(&upd->qur_link, list);
}
/* must hold the qsd_lock */
-static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
- __u64 ver)
+static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi,
+ struct list_head *list, __u64 ver)
{
struct qsd_upd_rec *upd, *tmp;
ENTRY;
/* Get the first update record in the list, which has the smallest
* version, discard all records with versions smaller than the current
* one */
- cfs_list_for_each_entry_safe(upd, tmp, list, qur_link) {
+ list_for_each_entry_safe(upd, tmp, list, qur_link) {
if (upd->qur_ver <= ver) {
/* drop this update */
- cfs_list_del_init(&upd->qur_link);
+ list_del_init(&upd->qur_link);
CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
- LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
+ "%llu/%llu, global:%d, qid:%llu\n",
qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
upd->qur_global, upd->qur_qid.qid_uid);
qsd_upd_free(upd);
}
/* No remaining deferred update */
- if (cfs_list_empty(list))
+ if (list_empty(list))
RETURN_EXIT;
CDEBUG(D_QUOTA, "%s: found deferred update record. "
- "version:"LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
+ "version:%llu/%llu, global:%d, qid:%llu\n",
qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
upd->qur_global, upd->qur_qid.qid_uid);
- LASSERTF(upd->qur_ver > ver, "lur_ver:"LPU64", cur_ver:"LPU64"\n",
+ LASSERTF(upd->qur_ver > ver, "lur_ver:%llu, cur_ver:%llu\n",
upd->qur_ver, ver);
/* Kick off the deferred udpate */
*/
void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
{
- cfs_list_t *list;
- __u64 *idx_ver;
+ struct list_head *list;
+ __u64 *idx_ver;
idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
list = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
__u64 cur_ver;
ENTRY;
- CDEBUG(D_QUOTA, "%s: schedule update. global:%s, version:"LPU64"\n",
+ CDEBUG(D_QUOTA, "%s: schedule update. global:%s, version:%llu\n",
qsd->qsd_svname, global ? "true" : "false", ver);
upd = qsd_upd_alloc(qqi, lqe, qid, rec, ver, global);
/* legitimate race between glimpse AST and
* reintegration */
CDEBUG(D_QUOTA, "%s: discarding glb update from glimpse"
- " ver:"LPU64" local ver:"LPU64"\n",
+ " ver:%llu local ver:%llu\n",
qsd->qsd_svname, ver, cur_ver);
else
- CERROR("%s: discard slv update, ver:"LPU64" local ver:"
- LPU64"\n", qsd->qsd_svname, ver, cur_ver);
+ CERROR("%s: discard slv update, ver:%llu local ver:"
+ "%llu\n", qsd->qsd_svname, ver, cur_ver);
qsd_upd_free(upd);
} else if ((ver == cur_ver + 1) && qqi->qqi_glb_uptodate &&
qqi->qqi_slv_uptodate) {
/* Out of order update (the one with smaller version hasn't
* reached slave or hasn't been flushed to disk yet), or
* the reintegration is in progress. Defer the update. */
- cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
- &qqi->qqi_deferred_slv;
+ struct list_head *list = global ? &qqi->qqi_deferred_glb :
+ &qqi->qqi_deferred_slv;
qsd_add_deferred(qsd, list, upd);
}
/* the lqe is being queued for the per-ID lock cancel, we should
* cancel the lock cancel and re-add it for quota adjust */
- if (!cfs_list_empty(&lqe->lqe_link) &&
+ if (!list_empty(&lqe->lqe_link) &&
lqe->lqe_adjust_time == 0) {
- cfs_list_del_init(&lqe->lqe_link);
+ list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
- if (cfs_list_empty(&lqe->lqe_link)) {
+ if (list_empty(&lqe->lqe_link)) {
if (cancel)
lqe->lqe_adjust_time = 0;
else
lqe->lqe_adjust_time = defer ?
cfs_time_shift_64(QSD_WB_INTERVAL) :
cfs_time_current_64();
- /* lqe reference transfered to list */
+ /* lqe reference transferred to list */
if (defer)
- cfs_list_add_tail(&lqe->lqe_link,
+ list_add_tail(&lqe->lqe_link,
&qsd->qsd_adjust_list);
else
- cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
+ list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
added = true;
}
spin_unlock(&qsd->qsd_adjust_lock);
/* return true if there is pending writeback records or the pending
* adjust requests */
-static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
+static bool qsd_job_pending(struct qsd_instance *qsd, struct list_head *upd,
bool *uptodate)
{
bool job_pending = false;
int qtype;
- LASSERT(cfs_list_empty(upd));
+ LASSERT(list_empty(upd));
*uptodate = true;
spin_lock(&qsd->qsd_adjust_lock);
- if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
+ if (!list_empty(&qsd->qsd_adjust_list)) {
struct lquota_entry *lqe;
- lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
+ lqe = list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
cfs_time_current_64()))
spin_unlock(&qsd->qsd_adjust_lock);
write_lock(&qsd->qsd_lock);
- if (!cfs_list_empty(&qsd->qsd_upd_list)) {
- cfs_list_splice_init(&qsd->qsd_upd_list, upd);
+ if (!list_empty(&qsd->qsd_upd_list)) {
+ list_splice_init(&qsd->qsd_upd_list, upd);
job_pending = true;
}
return job_pending;
}
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
if (!qsd_type_enabled(qsd, qtype))
struct qsd_instance *qsd = (struct qsd_instance *)arg;
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi;
- cfs_list_t queue;
+ struct list_head queue;
struct qsd_upd_rec *upd, *n;
struct lu_env *env;
int qtype, rc = 0;
bool uptodate;
- struct lquota_entry *lqe, *tmp;
+ struct lquota_entry *lqe;
__u64 cur_time;
ENTRY;
rc = lu_env_init(env, LCT_DT_THREAD);
if (rc) {
- CERROR("%s: Fail to init env.", qsd->qsd_svname);
+ CERROR("%s: cannot init env: rc = %d\n", qsd->qsd_svname, rc);
OBD_FREE_PTR(env);
RETURN(rc);
}
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
- CFS_INIT_LIST_HEAD(&queue);
+ INIT_LIST_HEAD(&queue);
lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
while (1) {
l_wait_event(thread->t_ctl_waitq,
qsd_job_pending(qsd, &queue, &uptodate) ||
!thread_is_running(thread), &lwi);
- cfs_list_for_each_entry_safe(upd, n, &queue, qur_link) {
- cfs_list_del_init(&upd->qur_link);
+ list_for_each_entry_safe(upd, n, &queue, qur_link) {
+ list_del_init(&upd->qur_link);
qsd_process_upd(env, upd);
qsd_upd_free(upd);
}
spin_lock(&qsd->qsd_adjust_lock);
cur_time = cfs_time_current_64();
- cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
- lqe_link) {
+ while (!list_empty(&qsd->qsd_adjust_list)) {
+ lqe = list_entry(qsd->qsd_adjust_list.next,
+ struct lquota_entry, lqe_link);
/* deferred items are sorted by time */
if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
cur_time))
break;
- cfs_list_del_init(&lqe->lqe_link);
+ list_del_init(&lqe->lqe_link);
spin_unlock(&qsd->qsd_adjust_lock);
if (thread_is_running(thread) && uptodate) {
if (uptodate)
continue;
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++)
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++)
qsd_start_reint_thread(qsd->qsd_type_array[qtype]);
}
lu_env_fini(env);
{
struct ptlrpc_thread *thread = &qsd->qsd_upd_thread;
struct l_wait_info lwi = { 0 };
- cfs_task_t *task;
+ struct task_struct *task;
ENTRY;
task = kthread_run(qsd_upd_thread, (void *)qsd,
"lquota_wb_%s", qsd->qsd_svname);
if (IS_ERR(task)) {
- CERROR("Fail to start quota update thread. rc: %ld\n",
+ CERROR("fail to start quota update thread: rc = %ld\n",
PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
RETURN(PTR_ERR(task));
{
int qtype;
- for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
+ for (qtype = USRQUOTA; qtype < LL_MAXQUOTAS; qtype++) {
struct qsd_upd_rec *upd, *tmp;
struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
continue;
write_lock(&qsd->qsd_lock);
- cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
- qur_link) {
- CWARN("%s: Free global deferred upd: ID:"LPU64", "
- "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
+ list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
+ qur_link) {
+ CWARN("%s: Free global deferred upd: ID:%llu, "
+ "ver:%llu/%llu\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
qqi->qqi_glb_ver);
list_del_init(&upd->qur_link);
qsd_upd_free(upd);
}
- cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
- qur_link) {
- CWARN("%s: Free slave deferred upd: ID:"LPU64", "
- "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
+ list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
+ qur_link) {
+ CWARN("%s: Free slave deferred upd: ID:%llu, "
+ "ver:%llu/%llu\n", qsd->qsd_svname,
upd->qur_qid.qid_uid, upd->qur_ver,
qqi->qqi_slv_ver);
list_del_init(&upd->qur_link);
struct lquota_entry *lqe;
spin_lock(&qsd->qsd_adjust_lock);
- while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
- lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
- struct lquota_entry, lqe_link);
- cfs_list_del_init(&lqe->lqe_link);
+ while (!list_empty(&qsd->qsd_adjust_list)) {
+ lqe = list_entry(qsd->qsd_adjust_list.next,
+ struct lquota_entry, lqe_link);
+ list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
spin_unlock(&qsd->qsd_adjust_lock);