Whamcloud - gitweb
LU-2683 lov: release all locks in closure to release sublock
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
index 84ee04f..a3f15b7 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 #include <obd_support.h>
 #include <lustre_fid.h>
 #include <libcfs/list.h>
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
 #include <cl_object.h>
 #include "cl_internal.h"
 
 /** Lock class of cl_lock::cll_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
 static cfs_mem_cache_t *cl_lock_kmem;
 
 static struct lu_kmem_descr cl_lock_caches[] = {
@@ -65,6 +62,22 @@ static struct lu_kmem_descr cl_lock_caches[] = {
         }
 };
 
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_LOCK_INC(o, item) \
+       cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCK_DEC(o, item) \
+       cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCKSTATE_INC(o, state) \
+        cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+#define CS_LOCKSTATE_DEC(o, state) \
+        cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+#else
+#define CS_LOCK_INC(o, item)
+#define CS_LOCK_DEC(o, item)
+#define CS_LOCKSTATE_INC(o, state)
+#define CS_LOCKSTATE_DEC(o, state)
+#endif
+
 /**
  * Basic lock invariant that is maintained at all times. Caller either has a
  * reference to \a lock, or somehow assures that \a lock cannot be freed.
@@ -142,7 +155,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
 #define RETIP ((unsigned long)__builtin_return_address(0))
 
 #ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t cl_lock_key;
+static struct lock_class_key cl_lock_key;
 
 static void cl_lock_lockdep_init(struct cl_lock *lock)
 {
@@ -269,13 +282,13 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
                 cfs_list_del_init(lock->cll_layers.next);
                 slice->cls_ops->clo_fini(env, slice);
         }
-        cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
-        cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
+       CS_LOCK_DEC(obj, total);
+       CS_LOCKSTATE_DEC(obj, lock->cll_state);
         lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
         cl_object_put(env, obj);
         lu_ref_fini(&lock->cll_reference);
         lu_ref_fini(&lock->cll_holders);
-        cfs_mutex_destroy(&lock->cll_guard);
+       mutex_destroy(&lock->cll_guard);
         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
         EXIT;
 }
@@ -308,7 +321,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
                         LASSERT(cfs_list_empty(&lock->cll_linkage));
                         cl_lock_free(env, lock);
                 }
-                cfs_atomic_dec(&site->cs_locks.cs_busy);
+               CS_LOCK_DEC(obj, busy);
         }
         EXIT;
 }
@@ -342,12 +355,10 @@ EXPORT_SYMBOL(cl_lock_get);
  */
 void cl_lock_get_trust(struct cl_lock *lock)
 {
-        struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
                cfs_atomic_read(&lock->cll_ref), lock, RETIP);
         if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
-                cfs_atomic_inc(&site->cs_locks.cs_busy);
+               CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
 }
 EXPORT_SYMBOL(cl_lock_get_trust);
 
@@ -373,7 +384,6 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
 {
         struct cl_lock          *lock;
         struct lu_object_header *head;
-        struct cl_site          *site = cl_object_site(obj);
 
         ENTRY;
         OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
@@ -389,13 +399,13 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
                 lu_ref_init(&lock->cll_reference);
                 lu_ref_init(&lock->cll_holders);
-                cfs_mutex_init(&lock->cll_guard);
-                cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+               mutex_init(&lock->cll_guard);
+               lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
                 cfs_waitq_init(&lock->cll_wq);
                 head = obj->co_lu.lo_header;
-                cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
-                cfs_atomic_inc(&site->cs_locks.cs_total);
-                cfs_atomic_inc(&site->cs_locks.cs_created);
+               CS_LOCKSTATE_INC(obj, CLS_NEW);
+               CS_LOCK_INC(obj, total);
+               CS_LOCK_INC(obj, create);
                 cl_lock_lockdep_init(lock);
                 cfs_list_for_each_entry(obj, &head->loh_layers,
                                         co_lu.lo_linkage) {
@@ -493,14 +503,12 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
 {
         struct cl_lock          *lock;
         struct cl_object_header *head;
-        struct cl_site          *site;
 
         ENTRY;
 
         head = cl_object_header(obj);
-        site = cl_object_site(obj);
         LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
-        cfs_atomic_inc(&site->cs_locks.cs_lookup);
+       CS_LOCK_INC(obj, lookup);
         cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
                 int matched;
 
@@ -514,7 +522,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
                        matched);
                 if (matched) {
                         cl_lock_get_trust(lock);
-                        cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+                       CS_LOCK_INC(obj, hit);
                         RETURN(lock);
                 }
         }
@@ -538,32 +546,30 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
         struct cl_object_header *head;
         struct cl_object        *obj;
         struct cl_lock          *lock;
-        struct cl_site          *site;
 
         ENTRY;
 
         obj  = need->cld_obj;
         head = cl_object_header(obj);
-        site = cl_object_site(obj);
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        lock = cl_lock_lookup(env, obj, io, need);
-        cfs_spin_unlock(&head->coh_lock_guard);
-
-        if (lock == NULL) {
-                lock = cl_lock_alloc(env, obj, io, need);
-                if (!IS_ERR(lock)) {
-                        struct cl_lock *ghost;
-
-                        cfs_spin_lock(&head->coh_lock_guard);
-                        ghost = cl_lock_lookup(env, obj, io, need);
-                        if (ghost == NULL) {
-                                cfs_list_add_tail(&lock->cll_linkage,
-                                                  &head->coh_locks);
-                                cfs_spin_unlock(&head->coh_lock_guard);
-                                cfs_atomic_inc(&site->cs_locks.cs_busy);
-                        } else {
-                                cfs_spin_unlock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
+       lock = cl_lock_lookup(env, obj, io, need);
+       spin_unlock(&head->coh_lock_guard);
+
+       if (lock == NULL) {
+               lock = cl_lock_alloc(env, obj, io, need);
+               if (!IS_ERR(lock)) {
+                       struct cl_lock *ghost;
+
+                       spin_lock(&head->coh_lock_guard);
+                       ghost = cl_lock_lookup(env, obj, io, need);
+                       if (ghost == NULL) {
+                               cfs_list_add_tail(&lock->cll_linkage,
+                                                 &head->coh_locks);
+                               spin_unlock(&head->coh_lock_guard);
+                               CS_LOCK_INC(obj, busy);
+                       } else {
+                               spin_unlock(&head->coh_lock_guard);
                                 /*
                                  * Other threads can acquire references to the
                                  * top-lock through its sub-locks. Hence, it
@@ -593,16 +599,24 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
         obj  = need->cld_obj;
         head = cl_object_header(obj);
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        lock = cl_lock_lookup(env, obj, io, need);
-        cfs_spin_unlock(&head->coh_lock_guard);
-
-        if (lock == NULL)
-                return NULL;
+       do {
+               spin_lock(&head->coh_lock_guard);
+               lock = cl_lock_lookup(env, obj, io, need);
+               spin_unlock(&head->coh_lock_guard);
+               if (lock == NULL)
+                       return NULL;
+
+               cl_lock_mutex_get(env, lock);
+               if (lock->cll_state == CLS_INTRANSIT)
+                       /* Don't care return value. */
+                       cl_lock_state_wait(env, lock);
+               if (lock->cll_state == CLS_FREEING) {
+                       cl_lock_mutex_put(env, lock);
+                       cl_lock_put(env, lock);
+                       lock = NULL;
+               }
+       } while (lock == NULL);
 
-        cl_lock_mutex_get(env, lock);
-        if (lock->cll_state == CLS_INTRANSIT)
-                cl_lock_state_wait(env, lock); /* Don't care return value. */
        cl_lock_hold_add(env, lock, scope, source);
        cl_lock_user_add(env, lock);
        if (lock->cll_state == CLS_CACHED)
@@ -686,7 +700,7 @@ void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
                 info = cl_env_info(env);
                 for (i = 0; i < hdr->coh_nesting; ++i)
                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
-                cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+               mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
                 lock->cll_guarder = cfs_current();
                 LINVRNT(lock->cll_depth == 0);
         }
@@ -716,7 +730,7 @@ int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
         if (lock->cll_guarder == cfs_current()) {
                 LINVRNT(lock->cll_depth > 0);
                 cl_lock_mutex_tail(env, lock);
-        } else if (cfs_mutex_trylock(&lock->cll_guard)) {
+       } else if (mutex_trylock(&lock->cll_guard)) {
                 LINVRNT(lock->cll_depth == 0);
                 lock->cll_guarder = cfs_current();
                 cl_lock_mutex_tail(env, lock);
@@ -750,7 +764,7 @@ void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
         counters->ctc_nr_locks_locked--;
         if (--lock->cll_depth == 0) {
                 lock->cll_guarder = NULL;
-                cfs_mutex_unlock(&lock->cll_guard);
+               mutex_unlock(&lock->cll_guard);
         }
 }
 EXPORT_SYMBOL(cl_lock_mutex_put);
@@ -818,10 +832,10 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
 
                 head = cl_object_header(lock->cll_descr.cld_obj);
 
-                cfs_spin_lock(&head->coh_lock_guard);
-                cfs_list_del_init(&lock->cll_linkage);
+               spin_lock(&head->coh_lock_guard);
+               cfs_list_del_init(&lock->cll_linkage);
+               spin_unlock(&head->coh_lock_guard);
 
-                cfs_spin_unlock(&head->coh_lock_guard);
                 /*
                  * From now on, no new references to this lock can be acquired
                  * by cl_lock_lookup().
@@ -1028,8 +1042,6 @@ EXPORT_SYMBOL(cl_lock_signal);
 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
                        enum cl_lock_state state)
 {
-        struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
         ENTRY;
         LASSERT(lock->cll_state <= state ||
                 (lock->cll_state == CLS_CACHED &&
@@ -1040,8 +1052,8 @@ void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
                 lock->cll_state == CLS_INTRANSIT);
 
         if (lock->cll_state != state) {
-                cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
-                cfs_atomic_inc(&site->cs_locks_state[state]);
+               CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
+               CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
 
                 cl_lock_state_signal(env, lock, state);
                 lock->cll_state = state;
@@ -1605,10 +1617,10 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
          * now. If locks were indexed according to their extent and/or mode,
          * that index would have to be updated here.
          */
-        cfs_spin_lock(&hdr->coh_lock_guard);
-        lock->cll_descr = *desc;
-        cfs_spin_unlock(&hdr->coh_lock_guard);
-        RETURN(0);
+       spin_lock(&hdr->coh_lock_guard);
+       lock->cll_descr = *desc;
+       spin_unlock(&hdr->coh_lock_guard);
+       RETURN(0);
 }
 EXPORT_SYMBOL(cl_lock_modify);
 
@@ -1859,7 +1871,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
        need->cld_start = need->cld_end = index;
         need->cld_enq_flags = 0;
 
-        cfs_spin_lock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
         /* It is fine to match any group lock since there could be only one
          * with a uniq gid and it conflicts with all other lock modes too */
         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
@@ -1882,8 +1894,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
                         break;
                 }
         }
-        cfs_spin_unlock(&head->coh_lock_guard);
-        RETURN(lock);
+       spin_unlock(&head->coh_lock_guard);
+       RETURN(lock);
 }
 EXPORT_SYMBOL(cl_lock_at_pgoff);
 
@@ -1915,9 +1927,9 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
         if (index >= info->clt_fn_index) {
                 struct cl_lock *tmp;
 
-                /* refresh non-overlapped index */
-                tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
-                                      1, 0);
+               /* refresh non-overlapped index */
+               tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
+                                       lock, 1, 0);
                 if (tmp != NULL) {
                         /* Cache the first-non-overlapped index so as to skip
                          * all pages within [index, clt_fn_index). This
@@ -2032,12 +2044,12 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
         LASSERT(ergo(!cancel,
                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        while (!cfs_list_empty(&head->coh_locks)) {
-                lock = container_of(head->coh_locks.next,
-                                    struct cl_lock, cll_linkage);
-                cl_lock_get_trust(lock);
-                cfs_spin_unlock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
+       while (!cfs_list_empty(&head->coh_locks)) {
+               lock = container_of(head->coh_locks.next,
+                                   struct cl_lock, cll_linkage);
+               cl_lock_get_trust(lock);
+               spin_unlock(&head->coh_lock_guard);
                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
 
 again:
@@ -2061,10 +2073,10 @@ again:
                 cl_lock_mutex_put(env, lock);
                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
                 cl_lock_put(env, lock);
-                cfs_spin_lock(&head->coh_lock_guard);
-        }
-        cfs_spin_unlock(&head->coh_lock_guard);
-        EXIT;
+               spin_lock(&head->coh_lock_guard);
+       }
+       spin_unlock(&head->coh_lock_guard);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_locks_prune);