Whamcloud - gitweb
LU-3963 obdclass: convert to linux list api
[fs/lustre-release.git] / lustre / obdclass / cl_lock.c
index a2a1624..6aaf676 100644 (file)
@@ -41,6 +41,7 @@
 
 #define DEBUG_SUBSYSTEM S_CLASS
 
+#include <libcfs/libcfs.h>
 #include <obd_class.h>
 #include <obd_support.h>
 #include <lustre_fid.h>
@@ -65,13 +66,13 @@ static struct lu_kmem_descr cl_lock_caches[] = {
 
 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
 #define CS_LOCK_INC(o, item) \
-       cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+       atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
 #define CS_LOCK_DEC(o, item) \
-       cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+       atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
 #define CS_LOCKSTATE_INC(o, state) \
-        cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+       atomic_inc(&cl_object_site(o)->cs_locks_state[state])
 #define CS_LOCKSTATE_DEC(o, state) \
-        cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+       atomic_dec(&cl_object_site(o)->cs_locks_state[state])
 #else
 #define CS_LOCK_INC(o, item)
 #define CS_LOCK_DEC(o, item)
@@ -89,7 +90,7 @@ static int cl_lock_invariant_trusted(const struct lu_env *env,
                                      const struct cl_lock *lock)
 {
         return  ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
-                cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+               atomic_read(&lock->cll_ref) >= lock->cll_holds &&
                 lock->cll_holds >= lock->cll_users &&
                 lock->cll_holds >= 0 &&
                 lock->cll_users >= 0 &&
@@ -106,11 +107,11 @@ static int cl_lock_invariant(const struct lu_env *env,
 {
         int result;
 
-        result = cfs_atomic_read(&lock->cll_ref) > 0 &&
+       result = atomic_read(&lock->cll_ref) > 0 &&
                 cl_lock_invariant_trusted(env, lock);
-        if (!result && env != NULL)
-                CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
-        return result;
+       if (!result && env != NULL)
+               CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
+       return result;
 }
 
 /**
@@ -143,7 +144,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
         struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
         CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
                       "(%p/%d/%d) at %s():%d\n",
-               prefix, lock, cfs_atomic_read(&lock->cll_ref),
+              prefix, lock, atomic_read(&lock->cll_ref),
                lock->cll_guarder, lock->cll_depth,
                lock->cll_state, lock->cll_error, lock->cll_holds,
                lock->cll_users, lock->cll_flags,
@@ -203,12 +204,12 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
                        struct cl_object *obj,
                        const struct cl_lock_operations *ops)
 {
-        ENTRY;
-        slice->cls_lock = lock;
-        cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
-        slice->cls_obj = obj;
-        slice->cls_ops = ops;
-        EXIT;
+       ENTRY;
+       slice->cls_lock = lock;
+       list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+       slice->cls_obj = obj;
+       slice->cls_ops = ops;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_lock_slice_add);
 
@@ -268,13 +269,12 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
 
        ENTRY;
        cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
-       might_sleep();
-       while (!cfs_list_empty(&lock->cll_layers)) {
+       while (!list_empty(&lock->cll_layers)) {
                struct cl_lock_slice *slice;
 
-               slice = cfs_list_entry(lock->cll_layers.next,
-                                      struct cl_lock_slice, cls_linkage);
-               cfs_list_del_init(lock->cll_layers.next);
+               slice = list_entry(lock->cll_layers.next,
+                                  struct cl_lock_slice, cls_linkage);
+               list_del_init(lock->cll_layers.next);
                slice->cls_ops->clo_fini(env, slice);
        }
        CS_LOCK_DEC(obj, total);
@@ -307,11 +307,11 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
         LINVRNT(obj != NULL);
 
         CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
-               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+              atomic_read(&lock->cll_ref), lock, RETIP);
 
-        if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
+       if (atomic_dec_and_test(&lock->cll_ref)) {
                 if (lock->cll_state == CLS_FREEING) {
-                        LASSERT(cfs_list_empty(&lock->cll_linkage));
+                       LASSERT(list_empty(&lock->cll_linkage));
                         cl_lock_free(env, lock);
                 }
                CS_LOCK_DEC(obj, busy);
@@ -332,8 +332,8 @@ void cl_lock_get(struct cl_lock *lock)
 {
         LINVRNT(cl_lock_invariant(NULL, lock));
         CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
-               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
-        cfs_atomic_inc(&lock->cll_ref);
+              atomic_read(&lock->cll_ref), lock, RETIP);
+       atomic_inc(&lock->cll_ref);
 }
 EXPORT_SYMBOL(cl_lock_get);
 
@@ -349,8 +349,8 @@ EXPORT_SYMBOL(cl_lock_get);
 void cl_lock_get_trust(struct cl_lock *lock)
 {
         CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
-               cfs_atomic_read(&lock->cll_ref), lock, RETIP);
-        if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
+              atomic_read(&lock->cll_ref), lock, RETIP);
+       if (atomic_inc_return(&lock->cll_ref) == 1)
                CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
 }
 EXPORT_SYMBOL(cl_lock_get_trust);
@@ -371,25 +371,25 @@ static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
 }
 
 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
-                                     struct cl_object *obj,
-                                     const struct cl_io *io,
-                                     const struct cl_lock_descr *descr)
+                                    struct cl_object *obj,
+                                    const struct cl_io *io,
+                                    const struct cl_lock_descr *descr)
 {
-        struct cl_lock          *lock;
-        struct lu_object_header *head;
+       struct cl_lock          *lock;
+       struct lu_object_header *head;
 
-        ENTRY;
-       OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
-        if (lock != NULL) {
-                cfs_atomic_set(&lock->cll_ref, 1);
-                lock->cll_descr = *descr;
-                lock->cll_state = CLS_NEW;
-                cl_object_get(obj);
+       ENTRY;
+       OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
+       if (lock != NULL) {
+               atomic_set(&lock->cll_ref, 1);
+               lock->cll_descr = *descr;
+               lock->cll_state = CLS_NEW;
+               cl_object_get(obj);
                lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
                                     lock);
-               CFS_INIT_LIST_HEAD(&lock->cll_layers);
-               CFS_INIT_LIST_HEAD(&lock->cll_linkage);
-               CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
+               INIT_LIST_HEAD(&lock->cll_layers);
+               INIT_LIST_HEAD(&lock->cll_linkage);
+               INIT_LIST_HEAD(&lock->cll_inclosure);
                lu_ref_init(&lock->cll_reference);
                lu_ref_init(&lock->cll_holders);
                mutex_init(&lock->cll_guard);
@@ -400,20 +400,19 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
                CS_LOCK_INC(obj, total);
                CS_LOCK_INC(obj, create);
                cl_lock_lockdep_init(lock);
-               cfs_list_for_each_entry(obj, &head->loh_layers,
-                                       co_lu.lo_linkage) {
-                        int err;
-
-                        err = obj->co_ops->coo_lock_init(env, obj, lock, io);
-                        if (err != 0) {
-                                cl_lock_finish(env, lock);
-                                lock = ERR_PTR(err);
-                                break;
-                        }
-                }
-        } else
-                lock = ERR_PTR(-ENOMEM);
-        RETURN(lock);
+               list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
+                       int err;
+
+                       err = obj->co_ops->coo_lock_init(env, obj, lock, io);
+                       if (err != 0) {
+                               cl_lock_finish(env, lock);
+                               lock = ERR_PTR(err);
+                               break;
+                       }
+               }
+       } else
+               lock = ERR_PTR(-ENOMEM);
+       RETURN(lock);
 }
 
 /**
@@ -481,7 +480,7 @@ static int cl_lock_fits_into(const struct lu_env *env,
 
         LINVRNT(cl_lock_invariant_trusted(env, lock));
         ENTRY;
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_fits_into != NULL &&
                     !slice->cls_ops->clo_fits_into(env, slice, need, io))
                         RETURN(0);
@@ -500,9 +499,9 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
         ENTRY;
 
        head = cl_object_header(obj);
-       LINVRNT(spin_is_locked(&head->coh_lock_guard));
+       assert_spin_locked(&head->coh_lock_guard);
        CS_LOCK_INC(obj, lookup);
-       cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+       list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
                int matched;
 
                 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
@@ -557,8 +556,9 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
                        spin_lock(&head->coh_lock_guard);
                        ghost = cl_lock_lookup(env, obj, io, need);
                        if (ghost == NULL) {
-                               cfs_list_add_tail(&lock->cll_linkage,
-                                                 &head->coh_locks);
+                               cl_lock_get_trust(lock);
+                               list_add_tail(&lock->cll_linkage,
+                                             &head->coh_locks);
                                spin_unlock(&head->coh_lock_guard);
                                CS_LOCK_INC(obj, busy);
                        } else {
@@ -644,7 +644,7 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
         LINVRNT(cl_lock_invariant_trusted(NULL, lock));
         ENTRY;
 
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
                         RETURN(slice);
         }
@@ -801,8 +801,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
                 const struct cl_lock_slice *slice;
 
                 lock->cll_flags |= CLF_CANCELLED;
-                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                                cls_linkage) {
+               list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                           cls_linkage) {
                         if (slice->cls_ops->clo_cancel != NULL)
                                 slice->cls_ops->clo_cancel(env, slice);
                 }
@@ -820,35 +820,42 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
 
         ENTRY;
         if (lock->cll_state < CLS_FREEING) {
+               bool in_cache;
+
                 LASSERT(lock->cll_state != CLS_INTRANSIT);
                 cl_lock_state_set(env, lock, CLS_FREEING);
 
                 head = cl_object_header(lock->cll_descr.cld_obj);
 
                spin_lock(&head->coh_lock_guard);
-               cfs_list_del_init(&lock->cll_linkage);
+               in_cache = !list_empty(&lock->cll_linkage);
+               if (in_cache)
+                       list_del_init(&lock->cll_linkage);
                spin_unlock(&head->coh_lock_guard);
 
-                /*
-                 * From now on, no new references to this lock can be acquired
-                 * by cl_lock_lookup().
-                 */
-                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                                cls_linkage) {
-                        if (slice->cls_ops->clo_delete != NULL)
-                                slice->cls_ops->clo_delete(env, slice);
-                }
-                /*
-                 * From now on, no new references to this lock can be acquired
-                 * by layer-specific means (like a pointer from struct
-                 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
-                 * lov).
-                 *
-                 * Lock will be finally freed in cl_lock_put() when last of
-                 * existing references goes away.
-                 */
-        }
-        EXIT;
+               if (in_cache) /* coh_locks cache holds a refcount. */
+                       cl_lock_put(env, lock);
+
+               /*
+                * From now on, no new references to this lock can be acquired
+                * by cl_lock_lookup().
+                */
+               list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                           cls_linkage) {
+                       if (slice->cls_ops->clo_delete != NULL)
+                               slice->cls_ops->clo_delete(env, slice);
+               }
+               /*
+                * From now on, no new references to this lock can be acquired
+                * by layer-specific means (like a pointer from struct
+                * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
+                * lov).
+                *
+                * Lock will be finally freed in cl_lock_put() when last of
+                * existing references goes away.
+                */
+       }
+       EXIT;
 }
 
 /**
@@ -1001,7 +1008,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
        LINVRNT(cl_lock_is_mutexed(lock));
        LINVRNT(cl_lock_invariant(env, lock));
 
-       cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
                if (slice->cls_ops->clo_state != NULL)
                        slice->cls_ops->clo_state(env, slice, state);
        wake_up_all(&lock->cll_wq);
@@ -1059,29 +1066,29 @@ EXPORT_SYMBOL(cl_lock_state_set);
 
 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
 {
-        const struct cl_lock_slice *slice;
-        int result;
-
-        do {
-                result = 0;
+       const struct cl_lock_slice *slice;
+       int result;
 
-                LINVRNT(cl_lock_is_mutexed(lock));
-                LINVRNT(cl_lock_invariant(env, lock));
-                LASSERT(lock->cll_state == CLS_INTRANSIT);
+       do {
+               result = 0;
 
-                result = -ENOSYS;
-                cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
-                                                cls_linkage) {
-                        if (slice->cls_ops->clo_unuse != NULL) {
-                                result = slice->cls_ops->clo_unuse(env, slice);
-                                if (result != 0)
-                                        break;
-                        }
-                }
-                LASSERT(result != -ENOSYS);
-        } while (result == CLO_REPEAT);
+               LINVRNT(cl_lock_is_mutexed(lock));
+               LINVRNT(cl_lock_invariant(env, lock));
+               LASSERT(lock->cll_state == CLS_INTRANSIT);
+
+               result = -ENOSYS;
+               list_for_each_entry_reverse(slice, &lock->cll_layers,
+                                           cls_linkage) {
+                       if (slice->cls_ops->clo_unuse != NULL) {
+                               result = slice->cls_ops->clo_unuse(env, slice);
+                               if (result != 0)
+                                       break;
+                       }
+               }
+               LASSERT(result != -ENOSYS);
+       } while (result == CLO_REPEAT);
 
-        return result;
+       return result;
 }
 
 /**
@@ -1105,7 +1112,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
 
         result = -ENOSYS;
         state = cl_lock_intransit(env, lock);
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_use != NULL) {
                         result = slice->cls_ops->clo_use(env, slice);
                         if (result != 0)
@@ -1158,7 +1165,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
 
         ENTRY;
         result = -ENOSYS;
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_enqueue != NULL) {
                         result = slice->cls_ops->clo_enqueue(env,
                                                              slice, io, flags);
@@ -1488,7 +1495,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
                         break;
 
                 result = -ENOSYS;
-                cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+               list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                         if (slice->cls_ops->clo_wait != NULL) {
                                 result = slice->cls_ops->clo_wait(env, slice);
                                 if (result != 0)
@@ -1561,7 +1568,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
         LINVRNT(cl_lock_invariant(env, lock));
 
         pound = 0;
-        cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_weigh != NULL) {
                         ounce = slice->cls_ops->clo_weigh(env, slice);
                         pound += ounce;
@@ -1598,7 +1605,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
         LINVRNT(cl_lock_is_mutexed(lock));
         LINVRNT(cl_lock_invariant(env, lock));
 
-        cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
                 if (slice->cls_ops->clo_modify != NULL) {
                         result = slice->cls_ops->clo_modify(env, slice, desc);
                         if (result != 0)
@@ -1631,7 +1638,7 @@ void cl_lock_closure_init(const struct lu_env *env,
         LINVRNT(cl_lock_is_mutexed(origin));
         LINVRNT(cl_lock_invariant(env, origin));
 
-        CFS_INIT_LIST_HEAD(&closure->clc_list);
+       INIT_LIST_HEAD(&closure->clc_list);
         closure->clc_origin = origin;
         closure->clc_wait   = wait;
         closure->clc_nr     = 0;
@@ -1660,7 +1667,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
 
         result = cl_lock_enclosure(env, lock, closure);
         if (result == 0) {
-                cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+               list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                         if (slice->cls_ops->clo_closure != NULL) {
                                 result = slice->cls_ops->clo_closure(env, slice,
                                                                      closure);
@@ -1693,10 +1700,10 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
                  * If lock->cll_inclosure is not empty, lock is already in
                  * this closure.
                  */
-                if (cfs_list_empty(&lock->cll_inclosure)) {
+               if (list_empty(&lock->cll_inclosure)) {
                         cl_lock_get_trust(lock);
                         lu_ref_add(&lock->cll_reference, "closure", closure);
-                        cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
+                       list_add(&lock->cll_inclosure, &closure->clc_list);
                         closure->clc_nr++;
                 } else
                         cl_lock_mutex_put(env, lock);
@@ -1726,19 +1733,19 @@ EXPORT_SYMBOL(cl_lock_enclosure);
 void cl_lock_disclosure(const struct lu_env *env,
                         struct cl_lock_closure *closure)
 {
-        struct cl_lock *scan;
-        struct cl_lock *temp;
-
-        cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
-        cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
-                                     cll_inclosure){
-                cfs_list_del_init(&scan->cll_inclosure);
-                cl_lock_mutex_put(env, scan);
-                lu_ref_del(&scan->cll_reference, "closure", closure);
-                cl_lock_put(env, scan);
-                closure->clc_nr--;
-        }
-        LASSERT(closure->clc_nr == 0);
+       struct cl_lock *scan;
+       struct cl_lock *temp;
+
+       cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
+       list_for_each_entry_safe(scan, temp, &closure->clc_list,
+                                cll_inclosure){
+               list_del_init(&scan->cll_inclosure);
+               cl_lock_mutex_put(env, scan);
+               lu_ref_del(&scan->cll_reference, "closure", closure);
+               cl_lock_put(env, scan);
+               closure->clc_nr--;
+       }
+       LASSERT(closure->clc_nr == 0);
 }
 EXPORT_SYMBOL(cl_lock_disclosure);
 
@@ -1746,7 +1753,7 @@ EXPORT_SYMBOL(cl_lock_disclosure);
 void cl_lock_closure_fini(struct cl_lock_closure *closure)
 {
         LASSERT(closure->clc_nr == 0);
-        LASSERT(cfs_list_empty(&closure->clc_list));
+       LASSERT(list_empty(&closure->clc_list));
 }
 EXPORT_SYMBOL(cl_lock_closure_fini);
 
@@ -1869,7 +1876,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
        spin_lock(&head->coh_lock_guard);
         /* It is fine to match any group lock since there could be only one
          * with a uniq gid and it conflicts with all other lock modes too */
-        cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+       list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
                 if (scan != except &&
                     (scan->cll_descr.cld_mode == CLM_GROUP ||
                     cl_lock_ext_match(&scan->cll_descr, need)) &&
@@ -1911,7 +1918,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
        head = cl_object_header(obj);
 
        spin_lock(&head->coh_lock_guard);
-       while (!cfs_list_empty(&head->coh_locks)) {
+       while (!list_empty(&head->coh_locks)) {
                lock = container_of(head->coh_locks.next,
                                    struct cl_lock, cll_linkage);
                cl_lock_get_trust(lock);
@@ -2054,8 +2061,8 @@ void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
         LASSERT(lock->cll_state != CLS_FREEING);
 
         ENTRY;
-        cl_lock_hold_mod(env, lock, +1);
         cl_lock_get(lock);
+       cl_lock_hold_mod(env, lock, +1);
         lu_ref_add(&lock->cll_holders, scope, source);
         lu_ref_add(&lock->cll_reference, scope, source);
         EXIT;
@@ -2129,10 +2136,8 @@ const char *cl_lock_mode_name(const enum cl_lock_mode mode)
                 [CLM_WRITE]   = "W",
                 [CLM_GROUP]   = "G"
         };
-        if (0 <= mode && mode < ARRAY_SIZE(names))
-                return names[mode];
-        else
-                return "U";
+       CLASSERT(CLM_MAX == ARRAY_SIZE(names));
+       return names[mode];
 }
 EXPORT_SYMBOL(cl_lock_mode_name);
 
@@ -2158,13 +2163,13 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
 {
         const struct cl_lock_slice *slice;
         (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
-                   lock, cfs_atomic_read(&lock->cll_ref),
+                  lock, atomic_read(&lock->cll_ref),
                    lock->cll_state, lock->cll_error, lock->cll_holds,
                    lock->cll_users, lock->cll_flags);
         cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
         (*printer)(env, cookie, " {\n");
 
-        cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+       list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
                 (*printer)(env, cookie, "    %s@%p: ",
                            slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
                            slice);