Whamcloud - gitweb
LU-4269 ldlm: Hold lock when clearing flag 72/8772/5
authorLi Xi <pkuelelixi@gmail.com>
Wed, 8 Jan 2014 09:13:16 +0000 (17:13 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 21 Feb 2014 03:46:47 +0000 (03:46 +0000)
This patch moves lock's skip flag clearing from lru-delete to
lru-add code to prevent clearing lock's flag without resource lock
proection.

Signed-off-by: Li Xi <lixi@ddn.com>
Signed-off-by: Bobi Jam <bobijam.xu@intel.com>
Change-Id: I5cce4699833c2a935e418bdd7181a2151612a8be
Reviewed-on: http://review.whamcloud.com/8772
Tested-by: Jenkins
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Jinshan Xiong <jinshan.xiong@intel.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_request.c

index 214ee8c..e7a5072 100644 (file)
@@ -250,18 +250,17 @@ EXPORT_SYMBOL(ldlm_lock_put);
  */
 int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
 {
-        int rc = 0;
-        if (!cfs_list_empty(&lock->l_lru)) {
-                struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
-
-                LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-                cfs_list_del_init(&lock->l_lru);
-               ldlm_clear_skipped(lock);
-                LASSERT(ns->ns_nr_unused > 0);
-                ns->ns_nr_unused--;
-                rc = 1;
-        }
-        return rc;
+       int rc = 0;
+       if (!cfs_list_empty(&lock->l_lru)) {
+               struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
+               LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
+               cfs_list_del_init(&lock->l_lru);
+               LASSERT(ns->ns_nr_unused > 0);
+               ns->ns_nr_unused--;
+               rc = 1;
+       }
+       return rc;
 }
 
 /**
@@ -290,14 +289,15 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
  */
 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
 {
-        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+       struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
-        lock->l_last_used = cfs_time_current();
-        LASSERT(cfs_list_empty(&lock->l_lru));
-        LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
-        cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
-        LASSERT(ns->ns_nr_unused >= 0);
-        ns->ns_nr_unused++;
+       lock->l_last_used = cfs_time_current();
+       LASSERT(cfs_list_empty(&lock->l_lru));
+       LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
+       cfs_list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+       ldlm_clear_skipped(lock);
+       LASSERT(ns->ns_nr_unused >= 0);
+       ns->ns_nr_unused++;
 }
 
 /**
index e140e0f..a90d452 100644 (file)
@@ -1665,20 +1665,20 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                         /* No locks which got blocking requests. */
                        LASSERT(!ldlm_is_bl_ast(lock));
 
-                        if (flags & LDLM_CANCEL_NO_WAIT &&
+                       if (flags & LDLM_CANCEL_NO_WAIT &&
                            ldlm_is_skipped(lock))
-                                /* already processed */
-                                continue;
+                               /* already processed */
+                               continue;
 
                        /* Somebody is already doing CANCEL. No need for this
                         * lock in LRU, do not traverse it again. */
                        if (!ldlm_is_canceling(lock))
                                 break;
 
-                        ldlm_lock_remove_from_lru_nolock(lock);
-                }
-                if (&lock->l_lru == &ns->ns_unused_list)
-                        break;
+                       ldlm_lock_remove_from_lru_nolock(lock);
+               }
+               if (&lock->l_lru == &ns->ns_unused_list)
+                       break;
 
                LDLM_LOCK_GET(lock);
                spin_unlock(&ns->ns_lock);