Whamcloud - gitweb
LU-2361 quota: allow upgraded fs to start w/o spc accounting
[fs/lustre-release.git] / lustre / quota / qsd_writeback.c
index 6eb8931..871a14e 100644 (file)
@@ -21,7 +21,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright (c) 2011, 2012, Intel, Inc.
+ * Copyright (c) 2012, Intel Corporation.
  * Use is subject to license terms.
  *
  * Author: Johann Lombardi <johann.lombardi@intel.com>
@@ -103,13 +103,25 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
 /* must hold the qsd_lock */
 static void qsd_add_deferred(cfs_list_t *list, struct qsd_upd_rec *upd)
 {
-       struct qsd_upd_rec      *tmp;
+       struct qsd_upd_rec      *tmp, *n;
 
        /* Sort the updates in ascending order */
-       cfs_list_for_each_entry_reverse(tmp, list, qur_link) {
-
-               LASSERTF(upd->qur_ver != tmp->qur_ver, "ver:"LPU64"\n",
-                        upd->qur_ver);
+       cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
+
+               /* There could be some legacy records which have duplicated
+                * version. Imagine following scenario: slave received global
+                * glimpse and queued a record in the deferred list, then
+                * master crash and rollback to an ealier version, then the
+                * version of queued record will be conflicting with later
+                * updates. We should just delete the legacy record in such
+                * case. */
+               if (upd->qur_ver == tmp->qur_ver) {
+                       LASSERT(tmp->qur_lqe);
+                       LQUOTA_ERROR(tmp->qur_lqe, "Found a conflict record "
+                                    "with ver:"LPU64"", tmp->qur_ver);
+                       cfs_list_del_init(&tmp->qur_link);
+                       qsd_upd_free(tmp);
+               }
 
                if (upd->qur_ver < tmp->qur_ver) {
                        continue;
@@ -179,14 +191,14 @@ void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
        idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
        list    = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
 
-       cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+       write_lock(&qqi->qqi_qsd->qsd_lock);
        *idx_ver = ver;
        if (global)
                qqi->qqi_glb_uptodate = 1;
        else
                qqi->qqi_slv_uptodate = 1;
        qsd_kickoff_deferred(qqi, list, ver);
-       cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+       write_unlock(&qqi->qqi_qsd->qsd_lock);
 }
 
 /*
@@ -218,13 +230,13 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
        /* If we don't want update index version, no need to sort the
         * records in version order, just schedule the updates instantly. */
        if (ver == 0) {
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                qsd_upd_add(qsd, upd);
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
 
        cur_ver = global ? qqi->qqi_glb_ver : qqi->qqi_slv_ver;
 
@@ -252,7 +264,7 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                qsd_add_deferred(list, upd);
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        EXIT;
 }
@@ -276,12 +288,12 @@ static int qsd_process_upd(const struct lu_env *env, struct qsd_upd_rec *upd)
                rc = qsd_update_lqe(env, lqe, upd->qur_global, &upd->qur_rec);
                if (rc)
                        GOTO(out, rc);
+               /* refresh usage */
+               qsd_refresh_usage(env, lqe);
                /* Report usage asynchronously */
-               if (lqe->lqe_enforced &&
-                   !qsd_refresh_usage(env, lqe)) {
-                       rc = qsd_dqacq(env, lqe, QSD_REP);
-                       LQUOTA_DEBUG(lqe, "Report usage. rc:%d", rc);
-               }
+               rc = qsd_adjust(env, lqe);
+               if (rc)
+                       LQUOTA_ERROR(lqe, "failed to report usage, rc:%d", rc);
        }
 
        rc = qsd_update_index(env, qqi, &upd->qur_qid, upd->qur_global,
@@ -300,7 +312,7 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        bool                     added = false;
 
        lqe_getref(lqe);
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
 
        /* the lqe is being queued for the per-ID lock cancel, we should
         * cancel the lock cancel and re-add it for quota adjust */
@@ -325,7 +337,7 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
                        cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
                added = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
        if (added)
                cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
@@ -344,7 +356,7 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
        LASSERT(cfs_list_empty(upd));
        *uptodate = true;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
        if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
                struct lquota_entry *lqe;
                lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
@@ -353,14 +365,21 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
                                        cfs_time_current_64()))
                        job_pending = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        if (!cfs_list_empty(&qsd->qsd_upd_list)) {
                cfs_list_splice_init(&qsd->qsd_upd_list, upd);
                job_pending = true;
        }
 
+       if (qsd->qsd_acct_failed) {
+               /* don't bother kicking off reintegration if space accounting
+                * failed to be enabled */
+               write_unlock(&qsd->qsd_lock);
+               return job_pending;
+       }
+
        for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
                struct qsd_qtype_info *qqi = qsd->qsd_type_array[qtype];
 
@@ -374,7 +393,7 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
                        *uptodate = false;
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
        return job_pending;
 }
 
@@ -423,7 +442,7 @@ static int qsd_upd_thread(void *arg)
                        qsd_upd_free(upd);
                }
 
-               cfs_spin_lock(&qsd->qsd_adjust_lock);
+               spin_lock(&qsd->qsd_adjust_lock);
                cur_time = cfs_time_current_64();
                cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
                                             lqe_link) {
@@ -433,20 +452,20 @@ static int qsd_upd_thread(void *arg)
                                break;
 
                        cfs_list_del_init(&lqe->lqe_link);
-                       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+                       spin_unlock(&qsd->qsd_adjust_lock);
 
                        if (thread_is_running(thread) && uptodate) {
                                qsd_refresh_usage(env, lqe);
                                if (lqe->lqe_adjust_time == 0)
                                        qsd_id_lock_cancel(env, lqe);
                                else
-                                       qsd_dqacq(env, lqe, QSD_ADJ);
+                                       qsd_adjust(env, lqe);
                        }
 
                        lqe_putref(lqe);
-                       cfs_spin_lock(&qsd->qsd_adjust_lock);
+                       spin_lock(&qsd->qsd_adjust_lock);
                }
-               cfs_spin_unlock(&qsd->qsd_adjust_lock);
+               spin_unlock(&qsd->qsd_adjust_lock);
 
                if (!thread_is_running(thread))
                        break;
@@ -495,7 +514,7 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                if (qqi == NULL)
                        continue;
 
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
                                             qur_link) {
                        CWARN("%s: Free global deferred upd: ID:"LPU64", "
@@ -514,7 +533,7 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        list_del_init(&upd->qur_link);
                        qsd_upd_free(upd);
                }
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
        }
 }
 
@@ -522,14 +541,14 @@ static void qsd_cleanup_adjust(struct qsd_instance *qsd)
 {
        struct lquota_entry     *lqe;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
        while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
                lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
                                     struct lquota_entry, lqe_link);
                cfs_list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 }
 
 void qsd_stop_upd_thread(struct qsd_instance *qsd)