Whamcloud - gitweb
LU-3963 libcfs: convert md[d/t]/mg[c/s] to linux atomic primitives
[fs/lustre-release.git] / libcfs / libcfs / libcfs_lock.c
index b9e2f25..fa8c652 100644 (file)
@@ -21,7 +21,7 @@
  * GPL HEADER END
  */
 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -30,9 +30,6 @@
  * Author: liang@whamcloud.com
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
 #define DEBUG_SUBSYSTEM S_LNET
 
 #include <libcfs/libcfs.h>
@@ -49,7 +46,7 @@ cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
        cfs_percpt_free(pcl->pcl_locks);
        LIBCFS_FREE(pcl, sizeof(*pcl));
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock_free);
+EXPORT_SYMBOL(cfs_percpt_lock_free);
 
 /**
  * create cpu-partition lock, see libcfs_private.h for more detail.
@@ -62,7 +59,7 @@ struct cfs_percpt_lock *
 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
 {
        struct cfs_percpt_lock  *pcl;
-       cfs_spinlock_t          *lock;
+       spinlock_t              *lock;
        int                     i;
 
        /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
@@ -78,11 +75,11 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
        }
 
        cfs_percpt_for_each(lock, i, pcl->pcl_locks)
-               cfs_spin_lock_init(lock);
+               spin_lock_init(lock);
 
        return pcl;
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_alloc);
 
 /**
  * lock a CPU partition
@@ -109,13 +106,13 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
        }
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_lock(pcl->pcl_locks[index]);
+               spin_lock(pcl->pcl_locks[index]);
                return;
        }
 
        /* exclusive lock request */
        for (i = 0; i < ncpt; i++) {
-               cfs_spin_lock(pcl->pcl_locks[i]);
+               spin_lock(pcl->pcl_locks[i]);
                if (i == 0) {
                        LASSERT(!pcl->pcl_locked);
                        /* nobody should take private lock after this
@@ -124,7 +121,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
                }
        }
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_lock);
+EXPORT_SYMBOL(cfs_percpt_lock);
 
 /** unlock a CPU partition */
 void
@@ -136,7 +133,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
        index = ncpt == 1 ? 0 : index;
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_unlock(pcl->pcl_locks[index]);
+               spin_unlock(pcl->pcl_locks[index]);
                return;
        }
 
@@ -145,10 +142,10 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
                        LASSERT(pcl->pcl_locked);
                        pcl->pcl_locked = 0;
                }
-               cfs_spin_unlock(pcl->pcl_locks[i]);
+               spin_unlock(pcl->pcl_locks[i]);
        }
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
+EXPORT_SYMBOL(cfs_percpt_unlock);
 
 #else /* !__KERNEL__ */
 # ifdef HAVE_LIBPTHREAD
@@ -189,25 +186,25 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int lock)
 struct cfs_percpt_lock *
 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
 {
-       return (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC;
+       return ((struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
 }
 
 void
 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
 {
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
+       LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
 }
 
 void
 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
 {
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
+       LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
 }
 
 void
 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
 {
-       LASSERT(pcl == (struct cfs_percpt_lock *)CFS_PERCPT_LOCK_MAGIC);
+       LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
 }
 
 # endif /* HAVE_LIBPTHREAD */
@@ -215,18 +212,18 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
 
 /** free cpu-partition refcount */
 void
-cfs_percpt_atomic_free(cfs_atomic_t **refs)
+cfs_percpt_atomic_free(atomic_t **refs)
 {
        cfs_percpt_free(refs);
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_free);
+EXPORT_SYMBOL(cfs_percpt_atomic_free);
 
 /** allocate cpu-partition refcount with initial value @init_val */
-cfs_atomic_t **
+atomic_t **
 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
 {
-       cfs_atomic_t    **refs;
-       cfs_atomic_t    *ref;
+       atomic_t        **refs;
+       atomic_t        *ref;
        int             i;
 
        refs = cfs_percpt_alloc(cptab, sizeof(*ref));
@@ -234,22 +231,22 @@ cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
                return NULL;
 
        cfs_percpt_for_each(ref, i, refs)
-               cfs_atomic_set(ref, init_val);
+               atomic_set(ref, init_val);
        return refs;
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
+EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
 
 /** return sum of cpu-partition refs */
 int
-cfs_percpt_atomic_summary(cfs_atomic_t **refs)
+cfs_percpt_atomic_summary(atomic_t **refs)
 {
-       cfs_atomic_t    *ref;
+       atomic_t        *ref;
        int             i;
        int             val = 0;
 
        cfs_percpt_for_each(ref, i, refs)
-               val += cfs_atomic_read(ref);
+               val += atomic_read(ref);
 
        return val;
 }
-CFS_EXPORT_SYMBOL(cfs_percpt_atomic_summary);
+EXPORT_SYMBOL(cfs_percpt_atomic_summary);