From fe87443dcf15226000366ae638e71889dc18db95 Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Wed, 9 Jul 2014 02:25:24 -0400 Subject: [PATCH] LU-4249 quota: race in qsd_upd_thread() qsd_upd_thread() uses list_for_each_entry_safe() to process list items one by one, however, it has to drop lock while processing each item, that'll race with other list processing thread. The proper way is to check list head each time when it acquired lock. Lustre-commit: f8f7c34a7bbcf22aeca7699ce76254e41e3e95b7 Lustre-change: http://review.whamcloud.com/10988 Signed-off-by: Niu Yawei Change-Id: I83e665db4209d52c1358505125abdcba75a0a6fa Reviewed-on: http://review.whamcloud.com/11020 Tested-by: Jenkins Reviewed-by: Johann Lombardi Reviewed-by: Fan Yong Tested-by: Maloo --- lustre/quota/qsd_writeback.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lustre/quota/qsd_writeback.c b/lustre/quota/qsd_writeback.c index ea90c39..cbed0fe 100644 --- a/lustre/quota/qsd_writeback.c +++ b/lustre/quota/qsd_writeback.c @@ -419,7 +419,7 @@ static int qsd_upd_thread(void *arg) struct lu_env *env; int qtype, rc = 0; bool uptodate; - struct lquota_entry *lqe, *tmp; + struct lquota_entry *lqe; __u64 cur_time; ENTRY; @@ -452,8 +452,9 @@ static int qsd_upd_thread(void *arg) spin_lock(&qsd->qsd_adjust_lock); cur_time = cfs_time_current_64(); - cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list, - lqe_link) { + while (!list_empty(&qsd->qsd_adjust_list)) { + lqe = list_entry(qsd->qsd_adjust_list.next, + struct lquota_entry, lqe_link); /* deferred items are sorted by time */ if (!cfs_time_beforeq_64(lqe->lqe_adjust_time, cur_time)) -- 1.8.3.1