Whamcloud - gitweb
LU-5710 all: second batch of corrected typos and grammar errors
[fs/lustre-release.git] / lustre / quota / qsd_writeback.c
index cafadd6..67e2d8f 100644 (file)
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  * Use is subject to license terms.
  *
  * Author: Johann Lombardi <johann.lombardi@intel.com>
  * Author: Niu    Yawei    <yawei.niu@intel.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
 #define DEBUG_SUBSYSTEM S_LQUOTA
 
 #include "qsd_internal.h"
 
-extern cfs_mem_cache_t *upd_kmem;
+extern struct kmem_cache *upd_kmem;
 
 /*
  * Allocate and fill an qsd_upd_rec structure to be processed by the writeback
@@ -58,14 +54,13 @@ static struct qsd_upd_rec *qsd_upd_alloc(struct qsd_qtype_info *qqi,
 {
        struct qsd_upd_rec      *upd;
 
-       OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, CFS_ALLOC_IO);
+       OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, GFP_NOFS);
        if (upd == NULL) {
-               CERROR("Failed to allocate upd");
                return NULL;
        }
 
        /* fill it */
-       CFS_INIT_LIST_HEAD(&upd->qur_link);
+       INIT_LIST_HEAD(&upd->qur_link);
        upd->qur_qqi = qqi;
        upd->qur_lqe = lqe;
        if (lqe)
@@ -91,39 +86,58 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
        if (!qsd->qsd_stopping) {
                list_add_tail(&upd->qur_link, &qsd->qsd_upd_list);
                /* wake up the upd thread */
-               cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+               wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
        } else {
-               CWARN("%s: discard deferred update.\n", qsd->qsd_svname);
+               CWARN("%s: discard update.\n", qsd->qsd_svname);
                if (upd->qur_lqe)
-                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+                       LQUOTA_WARN(upd->qur_lqe, "discard update.");
                qsd_upd_free(upd);
        }
 }
 
 /* must hold the qsd_lock */
-static void qsd_add_deferred(cfs_list_t *list, struct qsd_upd_rec *upd)
+static void qsd_add_deferred(struct qsd_instance *qsd, struct list_head *list,
+                            struct qsd_upd_rec *upd)
 {
-       struct qsd_upd_rec      *tmp;
-
-       /* Sort the updates in ascending order */
-       cfs_list_for_each_entry_reverse(tmp, list, qur_link) {
+       struct qsd_upd_rec      *tmp, *n;
 
-               LASSERTF(upd->qur_ver != tmp->qur_ver, "ver:"LPU64"\n",
-                        upd->qur_ver);
+       if (qsd->qsd_stopping) {
+               CWARN("%s: discard deferred udpate.\n", qsd->qsd_svname);
+               if (upd->qur_lqe)
+                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+               qsd_upd_free(upd);
+               return;
+       }
 
-               if (upd->qur_ver < tmp->qur_ver) {
+       /* Sort the updates in ascending order */
+       list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
+
+               /* There could be some legacy records which have duplicated
+                * version. Imagine following scenario: slave received global
+                * glimpse and queued a record in the deferred list, then
+                * master crash and rollback to an ealier version, then the
+                * version of queued record will be conflicting with later
+                * updates. We should just delete the legacy record in such
+                * case. */
+               if (upd->qur_ver == tmp->qur_ver) {
+                       LASSERT(tmp->qur_lqe);
+                       LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
+                                    "with ver:"LPU64"", tmp->qur_ver);
+                       list_del_init(&tmp->qur_link);
+                       qsd_upd_free(tmp);
+               } else if (upd->qur_ver < tmp->qur_ver) {
                        continue;
                } else {
-                       cfs_list_add_tail(&upd->qur_link, &tmp->qur_link);
+                       list_add_tail(&upd->qur_link, &tmp->qur_link);
                        return;
                }
        }
-       cfs_list_add(&upd->qur_link, list);
+       list_add(&upd->qur_link, list);
 }
 
 /* must hold the qsd_lock */
-static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
-                                __u64 ver)
+static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi,
+                                struct list_head *list, __u64 ver)
 {
        struct qsd_upd_rec      *upd, *tmp;
        ENTRY;
@@ -131,10 +145,10 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
        /* Get the first update record in the list, which has the smallest
         * version, discard all records with versions smaller than the current
         * one */
-       cfs_list_for_each_entry_safe(upd, tmp, list, qur_link) {
+       list_for_each_entry_safe(upd, tmp, list, qur_link) {
                if (upd->qur_ver <= ver) {
                        /* drop this update */
-                       cfs_list_del_init(&upd->qur_link);
+                       list_del_init(&upd->qur_link);
                        CDEBUG(D_QUOTA, "%s: skipping deferred update ver:"
                               LPU64"/"LPU64", global:%d, qid:"LPU64"\n",
                               qqi->qqi_qsd->qsd_svname, upd->qur_ver, ver,
@@ -146,7 +160,7 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
        }
 
        /* No remaining deferred update */
-       if (cfs_list_empty(list))
+       if (list_empty(list))
                RETURN_EXIT;
 
        CDEBUG(D_QUOTA, "%s: found deferred update record. "
@@ -173,20 +187,20 @@ static void qsd_kickoff_deferred(struct qsd_qtype_info *qqi, cfs_list_t *list,
  */
 void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
 {
-       cfs_list_t      *list;
-       __u64           *idx_ver;
+       struct list_head *list;
+       __u64            *idx_ver;
 
        idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
        list    = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
 
-       cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+       write_lock(&qqi->qqi_qsd->qsd_lock);
        *idx_ver = ver;
        if (global)
                qqi->qqi_glb_uptodate = 1;
        else
                qqi->qqi_slv_uptodate = 1;
        qsd_kickoff_deferred(qqi, list, ver);
-       cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+       write_unlock(&qqi->qqi_qsd->qsd_lock);
 }
 
 /*
@@ -197,7 +211,7 @@ void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
  * \param  qid   - quota id
  * \param  rec   - global or slave record to be updated to disk
  * \param  ver   - new index file version
- * \param  global- ture : master record; false : slave record
+ * \param  global- true: master record; false: slave record
  */
 void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                      union lquota_id *qid, union lquota_rec *rec, __u64 ver,
@@ -218,13 +232,13 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
        /* If we don't want update index version, no need to sort the
         * records in version order, just schedule the updates instantly. */
        if (ver == 0) {
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                qsd_upd_add(qsd, upd);
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
 
        cur_ver = global ? qqi->qqi_glb_ver : qqi->qqi_slv_ver;
 
@@ -247,12 +261,12 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                /* Out of order update (the one with smaller version hasn't
                 * reached slave or hasn't been flushed to disk yet), or
                 * the reintegration is in progress. Defer the update. */
-               cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
-                                           &qqi->qqi_deferred_slv;
-               qsd_add_deferred(list, upd);
+               struct list_head *list = global ? &qqi->qqi_deferred_glb :
+                                                 &qqi->qqi_deferred_slv;
+               qsd_add_deferred(qsd, list, upd);
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        EXIT;
 }
@@ -276,12 +290,12 @@ static int qsd_process_upd(const struct lu_env *env, struct qsd_upd_rec *upd)
                rc = qsd_update_lqe(env, lqe, upd->qur_global, &upd->qur_rec);
                if (rc)
                        GOTO(out, rc);
+               /* refresh usage */
+               qsd_refresh_usage(env, lqe);
                /* Report usage asynchronously */
-               if (lqe->lqe_enforced &&
-                   !qsd_refresh_usage(env, lqe)) {
-                       rc = qsd_dqacq(env, lqe, QSD_REP);
-                       LQUOTA_DEBUG(lqe, "Report usage. rc:%d", rc);
-               }
+               rc = qsd_adjust(env, lqe);
+               if (rc)
+                       LQUOTA_ERROR(lqe, "failed to report usage, rc:%d", rc);
        }
 
        rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
@@ -299,79 +313,96 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        struct qsd_instance     *qsd = lqe2qqi(lqe)->qqi_qsd;
        bool                     added = false;
 
+       read_lock(&qsd->qsd_lock);
+       if (qsd->qsd_stopping) {
+               read_unlock(&qsd->qsd_lock);
+               return;
+       }
+       read_unlock(&qsd->qsd_lock);
+
        lqe_getref(lqe);
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
 
        /* the lqe is being queued for the per-ID lock cancel, we should
         * cancel the lock cancel and re-add it for quota adjust */
-       if (!cfs_list_empty(&lqe->lqe_link) &&
+       if (!list_empty(&lqe->lqe_link) &&
            lqe->lqe_adjust_time == 0) {
-               cfs_list_del_init(&lqe->lqe_link);
+               list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
 
-       if (cfs_list_empty(&lqe->lqe_link)) {
+       if (list_empty(&lqe->lqe_link)) {
                if (cancel)
                        lqe->lqe_adjust_time = 0;
                else
                        lqe->lqe_adjust_time = defer ?
                                cfs_time_shift_64(QSD_WB_INTERVAL) :
                                cfs_time_current_64();
-               /* lqe reference transfered to list */
+               /* lqe reference transferred to list */
                if (defer)
-                       cfs_list_add_tail(&lqe->lqe_link,
+                       list_add_tail(&lqe->lqe_link,
                                          &qsd->qsd_adjust_list);
                else
-                       cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
+                       list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
                added = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
        if (added)
-               cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
+               wake_up(&qsd->qsd_upd_thread.t_ctl_waitq);
        else
                lqe_putref(lqe);
 }
 
 /* return true if there is pending writeback records or the pending
  * adjust requests */
-static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
+static bool qsd_job_pending(struct qsd_instance *qsd, struct list_head *upd,
                            bool *uptodate)
 {
        bool    job_pending = false;
        int     qtype;
 
-       LASSERT(cfs_list_empty(upd));
+       LASSERT(list_empty(upd));
        *uptodate = true;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
-       if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
+       spin_lock(&qsd->qsd_adjust_lock);
+       if (!list_empty(&qsd->qsd_adjust_list)) {
                struct lquota_entry *lqe;
-               lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
+               lqe = list_entry(qsd->qsd_adjust_list.next,
                                     struct lquota_entry, lqe_link);
                if (cfs_time_beforeq_64(lqe->lqe_adjust_time,
                                        cfs_time_current_64()))
                        job_pending = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
-       cfs_write_lock(&qsd->qsd_lock);
-       if (!cfs_list_empty(&qsd->qsd_upd_list)) {
-               cfs_list_splice_init(&qsd->qsd_upd_list, upd);
+       write_lock(&qsd->qsd_lock);
+       if (!list_empty(&qsd->qsd_upd_list)) {
+               list_splice_init(&qsd->qsd_upd_list, upd);
                job_pending = true;
        }
 
+       if (qsd->qsd_acct_failed) {
+               /* don't bother kicking off reintegration if space accounting
+                * failed to be enabled */
+               write_unlock(&qsd->qsd_lock);
+               return job_pending;
+       }
+
        for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
                struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
 
                if (!qsd_type_enabled(qsd, qtype))
                        continue;
 
-               if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate)
+               if ((!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) &&
+                    !qqi->qqi_reint)
+                       /* global or slave index not up to date and reint
+                        * thread not running */
                        *uptodate = false;
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
        return job_pending;
 }
 
@@ -380,13 +411,12 @@ static int qsd_upd_thread(void *arg)
        struct qsd_instance     *qsd = (struct qsd_instance *)arg;
        struct ptlrpc_thread    *thread = &qsd->qsd_upd_thread;
        struct l_wait_info       lwi;
-       cfs_list_t               queue;
+       struct list_head         queue;
        struct qsd_upd_rec      *upd, *n;
-       char                     pname[MTI_NAME_MAXLEN];
        struct lu_env           *env;
        int                      qtype, rc = 0;
        bool                     uptodate;
-       struct lquota_entry     *lqe, *tmp;
+       struct lquota_entry     *lqe;
        __u64                    cur_time;
        ENTRY;
 
@@ -396,54 +426,52 @@ static int qsd_upd_thread(void *arg)
 
        rc = lu_env_init(env, LCT_DT_THREAD);
        if (rc) {
-               CERROR("%s: Fail to init env.", qsd->qsd_svname);
+               CERROR("%s: cannot init env: rc = %d\n", qsd->qsd_svname, rc);
                OBD_FREE_PTR(env);
                RETURN(rc);
        }
 
-       snprintf(pname, MTI_NAME_MAXLEN, "lquota_wb_%s", qsd->qsd_svname);
-       cfs_daemonize(pname);
-
        thread_set_flags(thread, SVC_RUNNING);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
 
-       CFS_INIT_LIST_HEAD(&queue);
+       INIT_LIST_HEAD(&queue);
        lwi = LWI_TIMEOUT(cfs_time_seconds(QSD_WB_INTERVAL), NULL, NULL);
        while (1) {
                l_wait_event(thread->t_ctl_waitq,
                             qsd_job_pending(qsd, &queue, &uptodate) ||
                             !thread_is_running(thread), &lwi);
 
-               cfs_list_for_each_entry_safe(upd, n, &queue, qur_link) {
-                       cfs_list_del_init(&upd->qur_link);
+               list_for_each_entry_safe(upd, n, &queue, qur_link) {
+                       list_del_init(&upd->qur_link);
                        qsd_process_upd(env, upd);
                        qsd_upd_free(upd);
                }
 
-               cfs_spin_lock(&qsd->qsd_adjust_lock);
+               spin_lock(&qsd->qsd_adjust_lock);
                cur_time = cfs_time_current_64();
-               cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
-                                            lqe_link) {
+               while (!list_empty(&qsd->qsd_adjust_list)) {
+                       lqe = list_entry(qsd->qsd_adjust_list.next,
+                                        struct lquota_entry, lqe_link);
                        /* deferred items are sorted by time */
                        if (!cfs_time_beforeq_64(lqe->lqe_adjust_time,
                                                 cur_time))
                                break;
 
-                       cfs_list_del_init(&lqe->lqe_link);
-                       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+                       list_del_init(&lqe->lqe_link);
+                       spin_unlock(&qsd->qsd_adjust_lock);
 
                        if (thread_is_running(thread) && uptodate) {
                                qsd_refresh_usage(env, lqe);
                                if (lqe->lqe_adjust_time == 0)
                                        qsd_id_lock_cancel(env, lqe);
                                else
-                                       qsd_dqacq(env, lqe, QSD_ADJ);
+                                       qsd_adjust(env, lqe);
                        }
 
                        lqe_putref(lqe);
-                       cfs_spin_lock(&qsd->qsd_adjust_lock);
+                       spin_lock(&qsd->qsd_adjust_lock);
                }
-               cfs_spin_unlock(&qsd->qsd_adjust_lock);
+               spin_unlock(&qsd->qsd_adjust_lock);
 
                if (!thread_is_running(thread))
                        break;
@@ -457,7 +485,7 @@ static int qsd_upd_thread(void *arg)
        lu_env_fini(env);
        OBD_FREE_PTR(env);
        thread_set_flags(thread, SVC_STOPPED);
-       cfs_waitq_signal(&thread->t_ctl_waitq);
+       wake_up(&thread->t_ctl_waitq);
        RETURN(rc);
 }
 
@@ -465,14 +493,16 @@ int qsd_start_upd_thread(struct qsd_instance *qsd)
 {
        struct ptlrpc_thread    *thread = &qsd->qsd_upd_thread;
        struct l_wait_info       lwi = { 0 };
-       int                      rc;
+       struct task_struct              *task;
        ENTRY;
 
-       rc = cfs_create_thread(qsd_upd_thread, (void *)qsd, 0);
-       if (rc < 0) {
-               CERROR("Fail to start quota update thread. rc: %d\n", rc);
+       task = kthread_run(qsd_upd_thread, (void *)qsd,
+                          "lquota_wb_%s", qsd->qsd_svname);
+       if (IS_ERR(task)) {
+               CERROR("fail to start quota update thread: rc = %ld\n",
+                       PTR_ERR(task));
                thread_set_flags(thread, SVC_STOPPED);
-               RETURN(rc);
+               RETURN(PTR_ERR(task));
        }
 
        l_wait_event(thread->t_ctl_waitq,
@@ -492,9 +522,9 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                if (qqi == NULL)
                        continue;
 
-               cfs_write_lock(&qsd->qsd_lock);
-               cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
-                                            qur_link) {
+               write_lock(&qsd->qsd_lock);
+               list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
+                                        qur_link) {
                        CWARN("%s: Free global deferred upd: ID:"LPU64", "
                              "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
                              upd->qur_qid.qid_uid, upd->qur_ver,
@@ -502,8 +532,8 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        list_del_init(&upd->qur_link);
                        qsd_upd_free(upd);
                }
-               cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
-                                            qur_link) {
+               list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_slv,
+                                        qur_link) {
                        CWARN("%s: Free slave deferred upd: ID:"LPU64", "
                              "ver:"LPU64"/"LPU64"\n", qsd->qsd_svname,
                              upd->qur_qid.qid_uid, upd->qur_ver,
@@ -511,7 +541,7 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        list_del_init(&upd->qur_link);
                        qsd_upd_free(upd);
                }
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
        }
 }
 
@@ -519,14 +549,14 @@ static void qsd_cleanup_adjust(struct qsd_instance *qsd)
 {
        struct lquota_entry     *lqe;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
-       while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
-               lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
-                                    struct lquota_entry, lqe_link);
-               cfs_list_del_init(&lqe->lqe_link);
+       spin_lock(&qsd->qsd_adjust_lock);
+       while (!list_empty(&qsd->qsd_adjust_list)) {
+               lqe = list_entry(qsd->qsd_adjust_list.next,
+                                struct lquota_entry, lqe_link);
+               list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 }
 
 void qsd_stop_upd_thread(struct qsd_instance *qsd)
@@ -536,7 +566,7 @@ void qsd_stop_upd_thread(struct qsd_instance *qsd)
 
        if (!thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_waitq_signal(&thread->t_ctl_waitq);
+               wake_up(&thread->t_ctl_waitq);
 
                l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
                             &lwi);