Whamcloud - gitweb
LU-3460 quota: don't schedule adjust when qsd stopped 69/8169/2
authorNiu Yawei <yawei.niu@intel.com>
Fri, 21 Jun 2013 08:05:25 +0000 (04:05 -0400)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 6 Nov 2013 15:12:34 +0000 (15:12 +0000)
When the qsd is stopped, we should not schedule quota adjust or
index update anymore, because the writeback thread has been stopped,
and those scheduled requests will never be processed.

Lustre-commit: 76ddf87488010a38a3e7b0b5923e0fe8e725326a
Lustre-change: http://review.whamcloud.com/6731

Signed-off-by: Bob Glossman <bob.glossman@intel.com>
Signed-off-by: Niu Yawei <yawei.niu@intel.com>
Reviewed-by: Johann Lombardi <johann.lombardi@intel.com>
Reviewed-by: Fan Yong <fan.yong@intel.com>
Change-Id: I7c10c79db4176b3af8c7741dd438a2095ddb9bb5
Reviewed-on: http://review.whamcloud.com/8169
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
lustre/quota/qsd_writeback.c

index fdc1ef5..451c3b4 100644 (file)
@@ -93,18 +93,27 @@ static void qsd_upd_add(struct qsd_instance *qsd, struct qsd_upd_rec *upd)
                /* wake up the upd thread */
                cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
        } else {
-               CWARN("%s: discard deferred update.\n", qsd->qsd_svname);
+               CWARN("%s: discard update.\n", qsd->qsd_svname);
                if (upd->qur_lqe)
-                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+                       LQUOTA_WARN(upd->qur_lqe, "discard update.");
                qsd_upd_free(upd);
        }
 }
 
 /* must hold the qsd_lock */
-static void qsd_add_deferred(cfs_list_t *list, struct qsd_upd_rec *upd)
+static void qsd_add_deferred(struct qsd_instance *qsd, cfs_list_t *list,
+                            struct qsd_upd_rec *upd)
 {
        struct qsd_upd_rec      *tmp, *n;
 
+       if (qsd->qsd_stopping) {
+               CWARN("%s: discard deferred udpate.\n", qsd->qsd_svname);
+               if (upd->qur_lqe)
+                       LQUOTA_WARN(upd->qur_lqe, "discard deferred update.");
+               qsd_upd_free(upd);
+               return;
+       }
+
        /* Sort the updates in ascending order */
        cfs_list_for_each_entry_safe_reverse(tmp, n, list, qur_link) {
 
@@ -261,7 +270,7 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                 * the reintegration is in progress. Defer the update. */
                cfs_list_t *list = global ? &qqi->qqi_deferred_glb :
                                            &qqi->qqi_deferred_slv;
-               qsd_add_deferred(list, upd);
+               qsd_add_deferred(qsd, list, upd);
        }
 
        write_unlock(&qsd->qsd_lock);
@@ -311,6 +320,13 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        struct qsd_instance     *qsd = lqe2qqi(lqe)->qqi_qsd;
        bool                     added = false;
 
+       read_lock(&qsd->qsd_lock);
+       if (qsd->qsd_stopping) {
+               read_unlock(&qsd->qsd_lock);
+               return;
+       }
+       read_unlock(&qsd->qsd_lock);
+
        lqe_getref(lqe);
        spin_lock(&qsd->qsd_adjust_lock);