Whamcloud - gitweb
LU-9230 ldlm: speed up preparation for list of lock cancel 27/26327/31
authorYang Sheng <yang.sheng@intel.com>
Mon, 25 Sep 2017 13:01:02 +0000 (21:01 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Thu, 17 May 2018 02:30:39 +0000 (02:30 +0000)
Keep the skipped locks in lru list will cause serious
contention for ns_lock. Since we have to travel them
every time in the ldlm_prepare_lru_list(). So we will
use a cursor to record position that last accessed
lock in lru list.

Change-Id: Ibda36a90e54076cb785a65910b34300639b3e140
Signed-off-by: Yang Sheng <yang.sheng@intel.com>
Signed-off-by: Sergey Cheremencev <c17829@cray.com>
Reviewed-on: https://review.whamcloud.com/26327
Reviewed-by: Fan Yong <fan.yong@intel.com>
Tested-by: Jenkins
Reviewed-by: Vitaly Fertman <c17818@cray.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/include/lustre_dlm.h
lustre/include/lustre_dlm_flags.h
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c

index 5fc4c5d..1a7c900 100644 (file)
@@ -418,6 +418,7 @@ struct ldlm_namespace {
        struct list_head        ns_unused_list;
        /** Number of locks in the LRU list above */
        int                     ns_nr_unused;
+       struct list_head        *ns_last_pos;
 
        /**
         * Maximum number of locks permitted in the LRU. If 0, means locks
index 865fdee..9d5ec33 100644 (file)
 #define ldlm_set_fail_loc(_l)           LDLM_SET_FLAG((  _l), 1ULL << 32)
 #define ldlm_clear_fail_loc(_l)         LDLM_CLEAR_FLAG((_l), 1ULL << 32)
 
-/**
- * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it. */
-#define LDLM_FL_SKIPPED                 0x0000000200000000ULL // bit  33
-#define ldlm_is_skipped(_l)             LDLM_TEST_FLAG(( _l), 1ULL << 33)
-#define ldlm_set_skipped(_l)            LDLM_SET_FLAG((  _l), 1ULL << 33)
-#define ldlm_clear_skipped(_l)          LDLM_CLEAR_FLAG((_l), 1ULL << 33)
-
 /** this lock is being destroyed */
 #define LDLM_FL_CBPENDING               0x0000000400000000ULL // bit  34
 #define ldlm_is_cbpending(_l)           LDLM_TEST_FLAG(( _l), 1ULL << 34)
index 0b75a03..41f4907 100644 (file)
@@ -236,6 +236,8 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
                struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
                LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
+               if (ns->ns_last_pos == &lock->l_lru)
+                       ns->ns_last_pos = lock->l_lru.prev;
                list_del_init(&lock->l_lru);
                LASSERT(ns->ns_nr_unused > 0);
                ns->ns_nr_unused--;
@@ -286,7 +288,6 @@ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
        LASSERT(list_empty(&lock->l_lru));
        LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
        list_add_tail(&lock->l_lru, &ns->ns_unused_list);
-       ldlm_clear_skipped(lock);
        LASSERT(ns->ns_nr_unused >= 0);
        ns->ns_nr_unused++;
 }
index 61be33c..55b395c 100644 (file)
@@ -1611,9 +1611,6 @@ ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
                                break;
                default:
                        result = LDLM_POLICY_SKIP_LOCK;
-                       lock_res_and_lock(lock);
-                       ldlm_set_skipped(lock);
-                       unlock_res_and_lock(lock);
                        break;
        }
 
@@ -1834,54 +1831,48 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                                 enum ldlm_lru_flags lru_flags)
 {
        ldlm_cancel_lru_policy_t pf;
-       struct ldlm_lock *lock, *next;
-       int added = 0, unused, remained;
+       int added = 0;
        int no_wait = lru_flags & LDLM_LRU_FLAG_NO_WAIT;
-       ENTRY;
 
-       spin_lock(&ns->ns_lock);
-       unused = ns->ns_nr_unused;
-       remained = unused;
+       ENTRY;
 
        if (!ns_connect_lru_resize(ns))
-               count += unused - ns->ns_max_unused;
+               count += ns->ns_nr_unused - ns->ns_max_unused;
 
        pf = ldlm_cancel_lru_policy(ns, lru_flags);
        LASSERT(pf != NULL);
 
-       while (!list_empty(&ns->ns_unused_list)) {
+       /* For any flags, stop scanning if @max is reached. */
+       while (!list_empty(&ns->ns_unused_list) && (max == 0 || added < max)) {
+               struct ldlm_lock *lock;
+               struct list_head *item, *next;
                enum ldlm_policy_res result;
                ktime_t last_use = ktime_set(0, 0);
 
-               /* all unused locks */
-               if (remained-- <= 0)
-                       break;
-
-               /* For any flags, stop scanning if @max is reached. */
-               if (max && added >= max)
-                       break;
+               spin_lock(&ns->ns_lock);
+               item = no_wait ? ns->ns_last_pos : &ns->ns_unused_list;
+               for (item = item->next, next = item->next;
+                    item != &ns->ns_unused_list;
+                    item = next, next = item->next) {
+                       lock = list_entry(item, struct ldlm_lock, l_lru);
 
-               list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
-                                        l_lru) {
                        /* No locks which got blocking requests. */
                        LASSERT(!ldlm_is_bl_ast(lock));
 
-                       if (no_wait && ldlm_is_skipped(lock))
-                               /* already processed */
-                               continue;
-
-                       last_use = lock->l_last_used;
-
-                       /* Somebody is already doing CANCEL. No need for this
-                        * lock in LRU, do not traverse it again. */
                        if (!ldlm_is_canceling(lock) ||
                            !ldlm_is_converting(lock))
                                break;
 
+                       /* Somebody is already doing CANCEL. No need for this
+                        * lock in LRU, do not traverse it again. */
                        ldlm_lock_remove_from_lru_nolock(lock);
                }
-               if (&lock->l_lru == &ns->ns_unused_list)
+               if (item == &ns->ns_unused_list) {
+                       spin_unlock(&ns->ns_lock);
                        break;
+               }
+
+               last_use = lock->l_last_used;
 
                LDLM_LOCK_GET(lock);
                spin_unlock(&ns->ns_lock);
@@ -1900,19 +1891,23 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                 * old locks, but additionally choose them by
                 * their weight. Big extent locks will stay in
                 * the cache. */
-               result = pf(ns, lock, unused, added, count);
+               result = pf(ns, lock, ns->ns_nr_unused, added, count);
                if (result == LDLM_POLICY_KEEP_LOCK) {
-                       lu_ref_del(&lock->l_reference,
-                                  __FUNCTION__, current);
+                       lu_ref_del(&lock->l_reference, __func__, current);
                        LDLM_LOCK_RELEASE(lock);
-                       spin_lock(&ns->ns_lock);
                        break;
                }
+
                if (result == LDLM_POLICY_SKIP_LOCK) {
-                       lu_ref_del(&lock->l_reference,
-                                  __func__, current);
+                       lu_ref_del(&lock->l_reference, __func__, current);
                        LDLM_LOCK_RELEASE(lock);
-                       spin_lock(&ns->ns_lock);
+                       if (no_wait) {
+                               spin_lock(&ns->ns_lock);
+                               if (!list_empty(&lock->l_lru) &&
+                                   lock->l_lru.prev == ns->ns_last_pos)
+                                       ns->ns_last_pos = &lock->l_lru;
+                               spin_unlock(&ns->ns_lock);
+                       }
                        continue;
                }
 
@@ -1929,7 +1924,6 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                        unlock_res_and_lock(lock);
                        lu_ref_del(&lock->l_reference, __FUNCTION__, current);
                        LDLM_LOCK_RELEASE(lock);
-                       spin_lock(&ns->ns_lock);
                        continue;
                }
                LASSERT(!lock->l_readers && !lock->l_writers);
@@ -1964,11 +1958,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
                list_add(&lock->l_bl_ast, cancels);
                unlock_res_and_lock(lock);
                lu_ref_del(&lock->l_reference, __FUNCTION__, current);
-               spin_lock(&ns->ns_lock);
                added++;
-               unused--;
        }
-       spin_unlock(&ns->ns_lock);
        RETURN(added);
 }
 
index 0bf6af5..cbbb074 100644 (file)
@@ -993,6 +993,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
         ns->ns_connect_flags      = 0;
         ns->ns_stopping           = 0;
        ns->ns_reclaim_start      = 0;
+       ns->ns_last_pos           = &ns->ns_unused_list;
 
        rc = ldlm_namespace_sysfs_register(ns);
        if (rc) {