Whamcloud - gitweb
LU-4558 clio: Solve a race in cl_lock_put 87/9887/3
authorJinshan Xiong <jinshan.xiong@intel.com>
Thu, 3 Apr 2014 00:35:45 +0000 (17:35 -0700)
committerOleg Drokin <oleg.drokin@intel.com>
Tue, 22 Apr 2014 02:03:15 +0000 (02:03 +0000)
It's not atomic to check the last reference and state of cl_lock
in cl_lock_put(). This can cause a problem that an using lock is
freed, if the process is preempted between atomic_dec_and_test()
and (lock->cll_state == CLS_FREEING).

This problem can be solved by holding a refcount by coh_locks. In
this case, it can be sure that if the lock refcount reaches zero,
nobody else can have any chance to use it again.

Lustre-commit: ec1a5d4f1f5b52ee5031ddc11a862c82996541f7
Lustre-change: http://review.whamcloud.com/9881

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: Bob Glossman <bob.glossman@intel.com>
Change-Id: I5e16396156e1c7b8b86f7aa74b7b4735bb774a0f
Reviewed-on: http://review.whamcloud.com/9887
Tested-by: Jenkins
Reviewed-by: Bobi Jam <bobijam@gmail.com>
Reviewed-by: James Simmons <uja.ornl@gmail.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/obdclass/cl_lock.c

index 8efd357..5ca57fc 100644 (file)
@@ -556,8 +556,9 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
                        spin_lock(&head->coh_lock_guard);
                        ghost = cl_lock_lookup(env, obj, io, need);
                        if (ghost == NULL) {
+                               cl_lock_get_trust(lock);
                                cfs_list_add_tail(&lock->cll_linkage,
-                                                 &head->coh_locks);
+                                             &head->coh_locks);
                                spin_unlock(&head->coh_lock_guard);
                                CS_LOCK_INC(obj, busy);
                        } else {
@@ -819,15 +820,22 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
 
         ENTRY;
         if (lock->cll_state < CLS_FREEING) {
+               bool in_cache;
+
                 LASSERT(lock->cll_state != CLS_INTRANSIT);
                 cl_lock_state_set(env, lock, CLS_FREEING);
 
                 head = cl_object_header(lock->cll_descr.cld_obj);
 
                spin_lock(&head->coh_lock_guard);
-               cfs_list_del_init(&lock->cll_linkage);
+               in_cache = !cfs_list_empty(&lock->cll_linkage);
+               if (in_cache)
+                       cfs_list_del_init(&lock->cll_linkage);
                spin_unlock(&head->coh_lock_guard);
 
+               if (in_cache) /* coh_locks cache holds a refcount. */
+                       cl_lock_put(env, lock);
+
                 /*
                  * From now on, no new references to this lock can be acquired
                  * by cl_lock_lookup().