In sub_trans_commit_cb(), the commit check should
be protected by lock, otherwise in some racy
scenarios, all committed will never be true,
even though all sub transaction has been committed.
Signed-off-by: wang di <di.wang@intel.com>
Change-Id: I8f43ca8083753ab6eef4f2be56ef77bb8640bb79
Reviewed-on: http://review.whamcloud.com/15690
Reviewed-by: Niu Yawei <yawei.niu@intel.com>
Tested-by: Jenkins
Reviewed-by: Alex Zhuravlev <alexey.zhuravlev@intel.com>
Reviewed-by: James Simmons <uja.ornl@yahoo.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
atomic_t tmt_refcount;
/* Other sub transactions will be listed here. */
struct list_head tmt_sub_thandle_list;
+ spinlock_t tmt_sub_lock;
struct list_head tmt_commit_list;
/* All of update records will packed here */
ENTRY;
/* Check if all sub thandles are committed */
+ spin_lock(&tmt->tmt_sub_lock);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_sub_th == sub_th) {
st->st_committed = 1;
if (!st->st_committed)
all_committed = false;
}
+ spin_unlock(&tmt->tmt_sub_lock);
if (tmt->tmt_result == 0)
tmt->tmt_result = err;
INIT_LIST_HEAD(&tmt->tmt_sub_thandle_list);
INIT_LIST_HEAD(&tmt->tmt_commit_list);
atomic_set(&tmt->tmt_refcount, 1);
-
+ spin_lock_init(&tmt->tmt_sub_lock);
init_waitqueue_head(&tmt->tmt_stop_waitq);
+
top_th->tt_multiple_thandle = tmt;
return 0;