From ec1a5d4f1f5b52ee5031ddc11a862c82996541f7 Mon Sep 17 00:00:00 2001 From: Jinshan Xiong Date: Wed, 2 Apr 2014 17:35:45 -0700 Subject: [PATCH] LU-4558 clio: Solve a race in cl_lock_put It's not atomic to check the last reference and state of cl_lock in cl_lock_put(). This can cause a problem that an using lock is freed, if the process is preempted between atomic_dec_and_test() and (lock->cll_state == CLS_FREEING). This problem can be solved by holding a refcount by coh_locks. In this case, it can be sure that if the lock refcount reaches zero, nobody else can have any chance to use it again. Signed-off-by: Jinshan Xiong Change-Id: I08b81bcd6a4040ea9608db97d60aa6706405ee8c --- lustre/obdclass/cl_lock.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index 8ac48d5..d37276c 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -556,8 +556,9 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env, spin_lock(&head->coh_lock_guard); ghost = cl_lock_lookup(env, obj, io, need); if (ghost == NULL) { - cfs_list_add_tail(&lock->cll_linkage, - &head->coh_locks); + cl_lock_get_trust(lock); + list_add_tail(&lock->cll_linkage, + &head->coh_locks); spin_unlock(&head->coh_lock_guard); CS_LOCK_INC(obj, busy); } else { @@ -819,15 +820,22 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock) ENTRY; if (lock->cll_state < CLS_FREEING) { + bool in_cache; + LASSERT(lock->cll_state != CLS_INTRANSIT); cl_lock_state_set(env, lock, CLS_FREEING); head = cl_object_header(lock->cll_descr.cld_obj); spin_lock(&head->coh_lock_guard); - cfs_list_del_init(&lock->cll_linkage); + in_cache = !list_empty(&lock->cll_linkage); + if (in_cache) + list_del_init(&lock->cll_linkage); spin_unlock(&head->coh_lock_guard); + if (in_cache) /* coh_locks cache holds a refcount. */ + cl_lock_put(env, lock); + /* * From now on, no new references to this lock can be acquired * by cl_lock_lookup(). -- 1.8.3.1