Whamcloud - gitweb
LU-3460 quota: don't schedule adjust when qsd stopped 31/6731/2
authorNiu Yawei <yawei.niu@intel.com>
Fri, 21 Jun 2013 08:05:25 +0000 (04:05 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 10 Jul 2013 02:50:55 +0000 (02:50 +0000)
When the qsd is stopped, we should not schedule quota adjust or
index update anymore, because the writeback thread has been stopped,
and those scheduled requests will never be processed.

Signed-off-by: Niu Yawei <yawei.niu@intel.com>
Change-Id: I33e0bbc7eb22be8a7edbc031f36ceff666575ebe
Reviewed-on: http://review.whamcloud.com/6731
Reviewed-by: Johann Lombardi <johann.lombardi@intel.com>
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Fan Yong <fan.yong@intel.com>
lustre/quota/qsd_writeback.c

index 828ff43..a2b01a4 100644 (file)
@@ -93,18 +93,27 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
                /* wake up the upd thread */
                cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
        } else {
-               CWARN("%s: discard deferred update.\n", qsd->qsd_svname);
+               CWARN("%s: discard update.\n", qsd->qsd_svname);
                if (upd->qur_lqe)
-                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+                       LQUOTA_WARN(upd->qur_lqe, "discard update.");
                qsd_upd_free(upd);
        }
 }
 
 /* must hold the qsd_lock */
-static void qsd_add_deferred(cfs_list_t *list, struct qsd_upd_rec *upd)
+static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+                            struct qsd_upd_rec *upd)
 {
        struct qsd_upd_rec      *tmp, *n;
 
+       if (qsd->qsd_stopping) {
+               CWARN("%s: discard deferred udpate.\n", qsd->qsd_svname);
+               if (upd->qur_lqe)
+                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+               qsd_upd_free(upd);
+               return;
+       }
+
        /* Sort the updates in ascending order */
        cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
 
@@ -261,7 +270,7 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                 * the reintegration is in progress. Defer the update. */
                cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
                                            &qqi->qqi_deferred_slv;
-               qsd_add_deferred(list, upd);
+               qsd_add_deferred(qsd, list, upd);
        }
 
        write_unlock(&qsd->qsd_lock);
@@ -311,6 +320,13 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        struct qsd_instance     *qsd = lqe2qqi(lqe)->qqi_qsd;
        bool                     added = false;
 
+       read_lock(&qsd->qsd_lock);
+       if (qsd->qsd_stopping) {
+               read_unlock(&qsd->qsd_lock);
+               return;
+       }
+       read_unlock(&qsd->qsd_lock);
+
        lqe_getref(lqe);
        spin_lock(&qsd->qsd_adjust_lock);