Whamcloud - gitweb
LU-1346 libcfs: cleanup libcfs atomic primitives 59/6959/7
authorPeng Tao <tao.peng@emc.com>
Wed, 11 Sep 2013 17:17:33 +0000 (01:17 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 27 Sep 2013 07:12:09 +0000 (07:12 +0000)
Only libcfs/ directory are converted yet. Other directories
are left for later patches.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Ic379ac493ca4ead1ee6aa8aeb52b4017ee65c93c
Reviewed-on: http://review.whamcloud.com/6959
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
26 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/libcfs.h
libcfs/include/libcfs/libcfs_hash.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/linux/libcfs.h
libcfs/include/libcfs/linux/linux-prim.h
libcfs/include/libcfs/lucache.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/winnt/winnt-fs.h
libcfs/include/libcfs/winnt/winnt-lock.h
libcfs/include/libcfs/winnt/winnt-mem.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/include/libcfs/winnt/winnt-tcpip.h
libcfs/libcfs/debug.c
libcfs/libcfs/fail.c
libcfs/libcfs/hash.c
libcfs/libcfs/libcfs_lock.c
libcfs/libcfs/module.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/upcall_cache.c
libcfs/libcfs/winnt/winnt-fs.c
libcfs/libcfs/winnt/winnt-lock.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/winnt/winnt-tcpip.c

index 0b79a07..556892c 100644 (file)
@@ -593,3 +593,41 @@ s/\bcfs_groups_alloc\b/groups_alloc/g
 # Random bytes
 s/\bcfs_get_random_bytes_prim\b/get_random_bytes/g
 /#[ \t]*define[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)/d
+
+# atomic
+s/\bcfs_atomic_t\b/atomic_t/g
+/typedef[ \t]*\batomic_t\b[ \t]*\batomic_t\b/d
+s/\bcfs_atomic_read\b/atomic_read/g
+/#[ \t]*define[ \t]*\batomic_read\b *( *\w* *)[ \t]*\batomic_read\b *( *\w* *)/d
+s/\bcfs_atomic_add_unless\b/atomic_add_unless/g
+s/\bcfs_atomic_cmpxchg\b/atomic_cmpxchg/g
+s/\bcfs_atomic_inc\b/atomic_inc/g
+/#[ \t]*define[ \t]*\batomic_inc\b *( *\w* *)[ \t]*\batomic_inc\b *( *\w* *)/d
+s/\bcfs_atomic_inc_and_test\b/atomic_inc_and_test/g
+/#[ \t]*define[ \t]*\batomic_inc_and_test\b *( *\w* *)[ \t]*\batomic_inc_and_test\b *( *\w* *)/d
+s/\bcfs_atomic_inc_return\b/atomic_inc_return/g
+/#[ \t]*define[ \t]*\batomic_inc_return\b *( *\w* *)[ \t]*\batomic_inc_return\b *( *\w* *)/d
+s/\bcfs_atomic_inc_not_zero\b/atomic_inc_not_zero/g
+/#[ \t]*define[ \t]*\batomic_inc_not_zero\b *( *\w* *)[ \t]*\batomic_inc_not_zero\b *( *\w* *)/d
+s/\bcfs_atomic_dec\b/atomic_dec/g
+/#[ \t]*define[ \t]*\batomic_dec\b *( *\w* *)[ \t]*\batomic_dec\b *( *\w* *)/d
+s/\bcfs_atomic_dec_and_test\b/atomic_dec_and_test/g
+/#[ \t]*define[ \t]*\batomic_dec_and_test\b *( *\w* *)[ \t]*\batomic_dec_and_test\b *( *\w* *)/d
+s/\bcfs_atomic_dec_return\b/atomic_dec_return/g
+/#[ \t]*define[ \t]*\batomic_dec_return\b *( *\w* *)[ \t]*\batomic_dec_return\b *( *\w* *)/d
+s/\bcfs_atomic_dec_and_lock\b/atomic_dec_and_lock/g
+/#[ \t]*define[ \t]*\batomic_dec_and_lock\b *( *\w* *, *\w* *)[ \t]*\batomic_dec_and_lock\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_set\b/atomic_set/g
+/#[ \t]*define[ \t]*\batomic_set\b *( *\w* *, *\w* *)[ \t]*\batomic_set\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_add\b/atomic_add/g
+/#[ \t]*define[ \t]*\batomic_add\b *( *\w* *, *\w* *)[ \t]*\batomic_add\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_add_return\b/atomic_add_return/g
+/#[ \t]*define[ \t]*\batomic_add_return\b *( *\w* *, *\w* *)[ \t]*\batomic_add_return\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub\b/atomic_sub/g
+/#[ \t]*define[ \t]*\batomic_sub\b *( *\w* *, *\w* *)[ \t]*\batomic_sub\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub_and_test\b/atomic_sub_and_test/g
+/#[ \t]*define[ \t]*\batomic_sub_and_test\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_and_test\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub_return\b/atomic_sub_return/g
+/#[ \t]*define[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)/d
+s/\bCFS_ATOMIC_INIT\b/ATOMIC_INIT/g
+/#[ \t]*define[ \t]*\bATOMIC_INIT\b *( *\w* *)[ \t]*\bATOMIC_INIT\b *( *\w* *)/d
index 8136fde..72c2cc6 100644 (file)
@@ -92,6 +92,31 @@ static inline int __is_po2(unsigned long long val)
 
 
 /*
+ * - * atomic
+ */
+
+typedef atomic_t cfs_atomic_t;
+
+#define cfs_atomic_read(atom)                atomic_read(atom)
+#define cfs_atomic_inc(atom)                 atomic_inc(atom)
+#define cfs_atomic_inc_and_test(atom)        atomic_inc_and_test(atom)
+#define cfs_atomic_inc_return(atom)          atomic_inc_return(atom)
+#define cfs_atomic_inc_not_zero(atom)        atomic_inc_not_zero(atom)
+#define cfs_atomic_add_unless(atom, a, u)    atomic_add_unless(atom, a, u)
+#define cfs_atomic_dec(atom)                 atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom)        atomic_dec_and_test(atom)
+#define cfs_atomic_dec_and_lock(atom, lock)  atomic_dec_and_lock(atom, lock)
+#define cfs_atomic_dec_return(atom)          atomic_dec_return(atom)
+#define cfs_atomic_set(atom, value)          atomic_set(atom, value)
+#define cfs_atomic_add(value, atom)          atomic_add(value, atom)
+#define cfs_atomic_add_return(value, atom)   atomic_add_return(value, atom)
+#define cfs_atomic_sub(value, atom)          atomic_sub(value, atom)
+#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
+#define cfs_atomic_sub_return(value, atom)   atomic_sub_return(value, atom)
+#define cfs_atomic_cmpxchg(atom, old, nv)    atomic_cmpxchg(atom, old, nv)
+#define CFS_ATOMIC_INIT(i)                   ATOMIC_INIT(i)
+
+/*
  * Some (nomina odiosa sunt) platforms define NULL as naked 0. This confuses
  * Lustre RETURN(NULL) macro.
  */
index babf3eb..5e7c9de 100644 (file)
@@ -272,12 +272,12 @@ typedef struct cfs_hash {
         /** hash list operations */
         struct cfs_hash_hlist_ops  *hs_hops;
         /** hash buckets-table */
-        cfs_hash_bucket_t         **hs_buckets;
-        /** total number of items on this hash-table */
-        cfs_atomic_t                hs_count;
-        /** hash flags, see cfs_hash_tag for detail */
-        __u16                       hs_flags;
-        /** # of extra-bytes for bucket, for user saving extended attributes */
+       cfs_hash_bucket_t         **hs_buckets;
+       /** total number of items on this hash-table */
+       atomic_t                hs_count;
+       /** hash flags, see cfs_hash_tag for detail */
+       __u16                       hs_flags;
+       /** # of extra-bytes for bucket, for user saving extended attributes */
         __u16                       hs_extra_bytes;
         /** wants to iterate */
         __u8                        hs_iterating;
@@ -301,12 +301,12 @@ typedef struct cfs_hash {
         __u32                       hs_rehash_count;
         /** # of iterators (caller of cfs_hash_for_each_*) */
         __u32                       hs_iterators;
-        /** rehash workitem */
-        cfs_workitem_t              hs_rehash_wi;
-        /** refcount on this hash table */
-        cfs_atomic_t                hs_refcount;
-        /** rehash buckets-table */
-        cfs_hash_bucket_t         **hs_rehash_buckets;
+       /** rehash workitem */
+       cfs_workitem_t              hs_rehash_wi;
+       /** refcount on this hash table */
+       atomic_t                    hs_refcount;
+       /** rehash buckets-table */
+       cfs_hash_bucket_t         **hs_rehash_buckets;
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
         /** serialize debug members */
        spinlock_t                      hs_dep_lock;
@@ -583,10 +583,10 @@ static inline void cfs_hash_unlock(cfs_hash_t *hs, int excl)
 }
 
 static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
-                                        cfs_atomic_t *condition)
+                                       atomic_t *condition)
 {
-        LASSERT(cfs_hash_with_no_bktlock(hs));
-        return cfs_atomic_dec_and_lock(condition, &hs->hs_lock.spin);
+       LASSERT(cfs_hash_with_no_bktlock(hs));
+       return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
 }
 
 static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
@@ -673,11 +673,10 @@ void cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
                              cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode);
 
 static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                                           cfs_atomic_t *condition)
+                                          atomic_t *condition)
 {
-        LASSERT(cfs_hash_with_spin_bktlock(hs));
-        return cfs_atomic_dec_and_lock(condition,
-                                       &bd->bd_bucket->hsb_lock.spin);
+       LASSERT(cfs_hash_with_spin_bktlock(hs));
+       return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
 }
 
 static inline cfs_hlist_head_t *cfs_hash_bd_hhead(cfs_hash_t *hs,
@@ -831,8 +830,8 @@ static inline int __cfs_hash_theta_frac(int theta)
 
 static inline int __cfs_hash_theta(cfs_hash_t *hs)
 {
-        return (cfs_atomic_read(&hs->hs_count) <<
-                CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
+       return (atomic_read(&hs->hs_count) <<
+               CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
 }
 
 static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
index 9c9f3c2..42f1bc5 100644 (file)
@@ -128,7 +128,7 @@ do {                                                                    \
         lbug_with_loc(&msgdata);                                        \
 } while(0)
 
-extern cfs_atomic_t libcfs_kmemory;
+extern atomic_t libcfs_kmemory;
 /*
  * Memory
  */
@@ -136,16 +136,16 @@ extern cfs_atomic_t libcfs_kmemory;
 
 # define libcfs_kmem_inc(ptr, size)            \
 do {                                           \
-       cfs_atomic_add(size, &libcfs_kmemory);  \
+       atomic_add(size, &libcfs_kmemory);      \
 } while (0)
 
 # define libcfs_kmem_dec(ptr, size)            \
 do {                                           \
-       cfs_atomic_sub(size, &libcfs_kmemory);  \
+       atomic_sub(size, &libcfs_kmemory);      \
 } while (0)
 
 # define libcfs_kmem_read()                    \
-       cfs_atomic_read(&libcfs_kmemory)
+       atomic_read(&libcfs_kmemory)
 
 #else
 # define libcfs_kmem_inc(ptr, size) do {} while (0)
@@ -353,71 +353,71 @@ void  cfs_array_free(void *vars);
 /** assert value of @a is equal to @v */
 #define LASSERT_ATOMIC_EQ(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) == v,                       \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) == v,                       \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is unequal to @v */
 #define LASSERT_ATOMIC_NE(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) != v,                       \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) != v,                       \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is little than @v */
 #define LASSERT_ATOMIC_LT(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) < v,                        \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) < v,                        \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is little/equal to @v */
 #define LASSERT_ATOMIC_LE(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) <= v,                       \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) <= v,                       \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is great than @v */
 #define LASSERT_ATOMIC_GT(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) > v,                        \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) > v,                        \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is great/equal to @v */
 #define LASSERT_ATOMIC_GE(a, v)                                 \
 do {                                                            \
-        LASSERTF(cfs_atomic_read(a) >= v,                       \
-                 "value: %d\n", cfs_atomic_read((a)));          \
+       LASSERTF(atomic_read(a) >= v,                       \
+                "value: %d\n", atomic_read((a)));          \
 } while (0)
 
 /** assert value of @a is great than @v1 and little than @v2 */
 #define LASSERT_ATOMIC_GT_LT(a, v1, v2)                         \
 do {                                                            \
-        int __v = cfs_atomic_read(a);                           \
-        LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v);     \
+       int __v = atomic_read(a);                           \
+       LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v);     \
 } while (0)
 
 /** assert value of @a is great than @v1 and little/equal to @v2 */
 #define LASSERT_ATOMIC_GT_LE(a, v1, v2)                         \
 do {                                                            \
-        int __v = cfs_atomic_read(a);                           \
-        LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v);    \
+       int __v = atomic_read(a);                           \
+       LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v);    \
 } while (0)
 
 /** assert value of @a is great/equal to @v1 and little than @v2 */
 #define LASSERT_ATOMIC_GE_LT(a, v1, v2)                         \
 do {                                                            \
-        int __v = cfs_atomic_read(a);                           \
-        LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v);    \
+       int __v = atomic_read(a);                           \
+       LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v);    \
 } while (0)
 
 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
 #define LASSERT_ATOMIC_GE_LE(a, v1, v2)                         \
 do {                                                            \
-        int __v = cfs_atomic_read(a);                           \
-        LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v);   \
+       int __v = atomic_read(a);                           \
+       LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v);   \
 } while (0)
 
 #else /* !LASSERT_ATOMIC_ENABLED */
@@ -513,11 +513,11 @@ void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
 /* unlock private lock \a index of \a pcl */
 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
 /* create percpt (atomic) refcount based on @cptab */
-cfs_atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
+atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
 /* destroy percpt refcount */
-void cfs_percpt_atomic_free(cfs_atomic_t **refs);
+void cfs_percpt_atomic_free(atomic_t **refs);
 /* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(cfs_atomic_t **refs);
+int cfs_percpt_atomic_summary(atomic_t **refs);
 
 
 /** Compile-time assertion.
index 2ee0c09..13bcd3a 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/sched.h> /* THREAD_SIZE */
 #include <linux/rbtree.h>
 
-
 #if !defined(__x86_64__)
 # ifdef  __ia64__
 #  define CDEBUG_STACK() (THREAD_SIZE -                                 \
index bcc8629..b59865b 100644 (file)
@@ -174,29 +174,4 @@ static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
         module_init(init);                    \
         module_exit(fini)
 
-/*
- * atomic
- */
-
-typedef atomic_t cfs_atomic_t;
-
-#define cfs_atomic_read(atom)                atomic_read(atom)
-#define cfs_atomic_inc(atom)                 atomic_inc(atom)
-#define cfs_atomic_inc_and_test(atom)        atomic_inc_and_test(atom)
-#define cfs_atomic_inc_return(atom)          atomic_inc_return(atom)
-#define cfs_atomic_inc_not_zero(atom)        atomic_inc_not_zero(atom)
-#define cfs_atomic_add_unless(atom, a, u)    atomic_add_unless(atom, a, u)
-#define cfs_atomic_dec(atom)                 atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom)        atomic_dec_and_test(atom)
-#define cfs_atomic_dec_and_lock(atom, lock)  atomic_dec_and_lock(atom, lock)
-#define cfs_atomic_dec_return(atom)          atomic_dec_return(atom)
-#define cfs_atomic_set(atom, value)          atomic_set(atom, value)
-#define cfs_atomic_add(value, atom)          atomic_add(value, atom)
-#define cfs_atomic_add_return(value, atom)   atomic_add_return(value, atom)
-#define cfs_atomic_sub(value, atom)          atomic_sub(value, atom)
-#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
-#define cfs_atomic_sub_return(value, atom)   atomic_sub_return(value, atom)
-#define cfs_atomic_cmpxchg(atom, old, nv)    atomic_cmpxchg(atom, old, nv)
-#define CFS_ATOMIC_INIT(i)                   ATOMIC_INIT(i)
-
 #endif
index 754beda..3446378 100644 (file)
@@ -85,7 +85,7 @@ struct md_identity {
 struct upcall_cache_entry {
        cfs_list_t              ue_hash;
        __u64                   ue_key;
-       cfs_atomic_t            ue_refcount;
+       atomic_t                ue_refcount;
        int                     ue_flags;
        wait_queue_head_t       ue_waitq;
        cfs_time_t              ue_acquire_expire;
index 0cac240..d0ff4ff 100644 (file)
@@ -251,26 +251,26 @@ static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
 /*
  * Atomic for single-threaded user-space
  */
-typedef struct { volatile int counter; } cfs_atomic_t;
-
-#define CFS_ATOMIC_INIT(i) { (i) }
-
-#define cfs_atomic_read(a) ((a)->counter)
-#define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
-#define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
-#define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
-#define cfs_atomic_inc(a)  (((a)->counter)++)
-#define cfs_atomic_dec(a)  do { (a)->counter--; } while (0)
-#define cfs_atomic_add(b,a)  do {(a)->counter += b;} while (0)
-#define cfs_atomic_add_return(n,a) ((a)->counter += n)
-#define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
-#define cfs_atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
-#define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
-#define cfs_atomic_dec_return(a)  cfs_atomic_sub_return(1,a)
-#define cfs_atomic_add_unless(v, a, u) \
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(a) ((a)->counter)
+#define atomic_set(a,b) do {(a)->counter = b; } while (0)
+#define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
+#define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
+#define atomic_inc(a)  (((a)->counter)++)
+#define atomic_dec(a)  do { (a)->counter--; } while (0)
+#define atomic_add(b,a)  do {(a)->counter += b;} while (0)
+#define atomic_add_return(n,a) ((a)->counter += n)
+#define atomic_inc_return(a) atomic_add_return(1,a)
+#define atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
+#define atomic_sub_return(n,a) ((a)->counter -= n)
+#define atomic_dec_return(a)  atomic_sub_return(1,a)
+#define atomic_add_unless(v, a, u) \
         ((v)->counter != u ? (v)->counter += a : 0)
-#define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
-#define cfs_atomic_cmpxchg(v, ov, nv) \
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_cmpxchg(v, ov, nv) \
        ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
 
 #ifdef HAVE_LIBPTHREAD
index 36809aa..41b207c 100644 (file)
@@ -247,13 +247,13 @@ struct inode {
 #define I_FREEING       0x0001
 
 struct dentry {
-        cfs_atomic_t    d_count;
-        struct {
-            int         len;
-            char *      name;
-        } d_name;
-        struct inode *  d_inode;
-        struct dentry*  d_parent;
+       atomic_t    d_count;
+       struct {
+           int         len;
+           char *      name;
+       } d_name;
+       struct inode *  d_inode;
+       struct dentry*  d_parent;
 };
 
 extern struct dentry *dget(struct dentry *de);
index 11371cc..fc39dd8 100644 (file)
@@ -64,31 +64,31 @@ typedef struct spin_lock spinlock_t;
 
 /* atomic */
 
-typedef struct { volatile int counter; } cfs_atomic_t;
+typedef struct { volatile int counter; } atomic_t;
 
-#define CFS_ATOMIC_INIT(i)     { i }
+#define ATOMIC_INIT(i) { i }
 
-#define cfs_atomic_read(v)     ((v)->counter)
-#define cfs_atomic_set(v,i)    (((v)->counter) = (i))
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i)        (((v)->counter) = (i))
 
-void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
-void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
+void FASTCALL atomic_add(int i, atomic_t *v);
+void FASTCALL atomic_sub(int i, atomic_t *v);
 
-int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
+int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
 
-void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
-void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
+void FASTCALL atomic_inc(atomic_t *v);
+void FASTCALL atomic_dec(atomic_t *v);
 
-int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
-int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
+int FASTCALL atomic_dec_and_test(atomic_t *v);
+int FASTCALL atomic_inc_and_test(atomic_t *v);
 
-int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
-int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
+int FASTCALL atomic_add_return(int i, atomic_t *v);
+int FASTCALL atomic_sub_return(int i, atomic_t *v);
 
-#define cfs_atomic_inc_return(v)  cfs_atomic_add_return(1, v)
-#define cfs_atomic_dec_return(v)  cfs_atomic_sub_return(1, v)
+#define atomic_inc_return(v)  atomic_add_return(1, v)
+#define atomic_dec_return(v)  atomic_sub_return(1, v)
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
+int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
 
 /* event */
 
index 668537c..205fc2c 100644 (file)
@@ -63,7 +63,7 @@
 
 struct page {
     void *          addr;
-    cfs_atomic_t    count;
+    atomic_t    count;
     void *          private;
     void *          mapping;
     __u32           index;
@@ -206,17 +206,17 @@ static inline void kunmap(struct page *page)
 
 static inline void get_page(struct page *page)
 {
-    cfs_atomic_inc(&page->count);
+    atomic_inc(&page->count);
 }
 
 static inline void cfs_put_page(struct page *page)
 {
-    cfs_atomic_dec(&page->count);
+    atomic_dec(&page->count);
 }
 
 static inline int page_count(struct page *page)
 {
-    return cfs_atomic_read(&page->count);
+    return atomic_read(&page->count);
 }
 
 #define page_index(p)       ((p)->index)
index 64238e2..153a9a8 100644 (file)
@@ -412,7 +412,7 @@ struct cfs_waitlink {
     unsigned int            magic;
     int                     flags;
     event_t  *              event;
-    cfs_atomic_t *          hits;
+    atomic_t *          hits;
 
     cfs_waitlink_channel_t  waitq[CFS_WAITQ_CHANNELS];
 };
@@ -456,18 +456,18 @@ typedef struct _cfs_thread_context {
 #define NGROUPS_PER_BLOCK       ((int)(PAGE_SIZE / sizeof(gid_t)))
 struct group_info {
        int ngroups;
-       cfs_atomic_t usage;
+       atomic_t usage;
        gid_t small_block[NGROUPS_SMALL];
        int nblocks;
        gid_t *blocks[0];
 };
 
 #define get_group_info(group_info) do { \
-        cfs_atomic_inc(&(group_info)->usage); \
+       atomic_inc(&(group_info)->usage); \
 } while (0)
 
 #define put_group_info(group_info) do { \
-        if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+       if (atomic_dec_and_test(&(group_info)->usage)) \
                groups_free(group_info); \
 } while (0)
 
@@ -608,8 +608,8 @@ typedef struct _TASK_SLOT {
     HANDLE          Tid;        /* Thread id */
     PETHREAD        Tet;        /* Pointer to ethread */
 
-    cfs_atomic_t    count;      /* refer count */
-    cfs_atomic_t    hits;       /* times of waken event singaled */
+    atomic_t    count;      /* refer count */
+    atomic_t    hits;       /* times of waken event singaled */
 
     KIRQL           irql;       /* irql for rwlock ... */
 
index 85c5695..8da448a 100644 (file)
@@ -431,13 +431,13 @@ struct socket {
         ks_tconn_type_t             kstc_type;          /* tdi connection Type */
         ks_tconn_state_t            kstc_state;      /* tdi connection state flag */
 
-        ks_unicode_name_t           kstc_dev;        /* tcp transport device name */
+       ks_unicode_name_t           kstc_dev;        /* tcp transport device name */
 
-        ks_tdi_addr_t               kstc_addr;       /* local address handlers / Objects */
+       ks_tdi_addr_t               kstc_addr;       /* local address handlers / Objects */
 
-        cfs_atomic_t                kstc_refcount;   /* reference count of ks_tconn_t */
+       atomic_t                    kstc_refcount;   /* reference count of ks_tconn_t */
 
-        cfs_list_t                  kstc_list;       /* linked to global ksocknal_data */
+       cfs_list_t                  kstc_list;       /* linked to global ksocknal_data */
 
         union {
 
index b3fe843..687c476 100644 (file)
@@ -107,7 +107,7 @@ CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
                 "Lustre kernel panic on LBUG");
 EXPORT_SYMBOL(libcfs_panic_on_lbug);
 
-cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
+atomic_t libcfs_kmemory = ATOMIC_INIT(0);
 EXPORT_SYMBOL(libcfs_kmemory);
 
 static wait_queue_head_t debug_ctlwq;
index e11caff..509c3c2 100644 (file)
@@ -51,75 +51,75 @@ EXPORT_SYMBOL(cfs_race_state);
 
 int __cfs_fail_check_set(__u32 id, __u32 value, int set)
 {
-        static cfs_atomic_t cfs_fail_count = CFS_ATOMIC_INIT(0);
-
-        LASSERT(!(id & CFS_FAIL_ONCE));
-
-        if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
-            (CFS_FAILED | CFS_FAIL_ONCE)) {
-                cfs_atomic_set(&cfs_fail_count, 0); /* paranoia */
-                return 0;
-        }
-
-        /* Fail 1/cfs_fail_val times */
-        if (cfs_fail_loc & CFS_FAIL_RAND) {
-                if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
-                        return 0;
-        }
-
-        /* Skip the first cfs_fail_val, then fail */
-        if (cfs_fail_loc & CFS_FAIL_SKIP) {
-                if (cfs_atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
-                        return 0;
-        }
-
-        /* check cfs_fail_val... */
-        if (set == CFS_FAIL_LOC_VALUE) {
-                if (cfs_fail_val != -1 && cfs_fail_val != value)
-                        return 0;
-        }
-
-        /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
-        if (cfs_fail_loc & CFS_FAIL_SOME &&
-            (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
-                int count = cfs_atomic_inc_return(&cfs_fail_count);
-
-                if (count >= cfs_fail_val) {
+       static atomic_t cfs_fail_count = ATOMIC_INIT(0);
+
+       LASSERT(!(id & CFS_FAIL_ONCE));
+
+       if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
+           (CFS_FAILED | CFS_FAIL_ONCE)) {
+               atomic_set(&cfs_fail_count, 0); /* paranoia */
+               return 0;
+       }
+
+       /* Fail 1/cfs_fail_val times */
+       if (cfs_fail_loc & CFS_FAIL_RAND) {
+               if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
+                       return 0;
+       }
+
+       /* Skip the first cfs_fail_val, then fail */
+       if (cfs_fail_loc & CFS_FAIL_SKIP) {
+               if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
+                       return 0;
+       }
+
+       /* check cfs_fail_val... */
+       if (set == CFS_FAIL_LOC_VALUE) {
+               if (cfs_fail_val != -1 && cfs_fail_val != value)
+                       return 0;
+       }
+
+       /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
+       if (cfs_fail_loc & CFS_FAIL_SOME &&
+           (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
+               int count = atomic_inc_return(&cfs_fail_count);
+
+               if (count >= cfs_fail_val) {
                        set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
-                        cfs_atomic_set(&cfs_fail_count, 0);
-                        /* we are lost race to increase  */
-                        if (count > cfs_fail_val)
-                                return 0;
-                }
-        }
-
-        if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
-            (value & CFS_FAIL_ONCE))
+                       atomic_set(&cfs_fail_count, 0);
+                       /* we are lost race to increase  */
+                       if (count > cfs_fail_val)
+                               return 0;
+               }
+       }
+
+       if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
+           (value & CFS_FAIL_ONCE))
                set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
-        /* Lost race to set CFS_FAILED_BIT. */
+       /* Lost race to set CFS_FAILED_BIT. */
        if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
-                /* If CFS_FAIL_ONCE is valid, only one process can fail,
-                 * otherwise multi-process can fail at the same time. */
-                if (cfs_fail_loc & CFS_FAIL_ONCE)
-                        return 0;
-        }
-
-        switch (set) {
-                case CFS_FAIL_LOC_NOSET:
-                case CFS_FAIL_LOC_VALUE:
-                        break;
-                case CFS_FAIL_LOC_ORSET:
-                        cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
-                        break;
-                case CFS_FAIL_LOC_RESET:
-                        cfs_fail_loc = value;
-                        break;
-                default:
-                        LASSERTF(0, "called with bad set %u\n", set);
-                        break;
-        }
-
-        return 1;
+               /* If CFS_FAIL_ONCE is valid, only one process can fail,
+                * otherwise multi-process can fail at the same time. */
+               if (cfs_fail_loc & CFS_FAIL_ONCE)
+                       return 0;
+       }
+
+       switch (set) {
+               case CFS_FAIL_LOC_NOSET:
+               case CFS_FAIL_LOC_VALUE:
+                       break;
+               case CFS_FAIL_LOC_ORSET:
+                       cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
+                       break;
+               case CFS_FAIL_LOC_RESET:
+                       cfs_fail_loc = value;
+                       break;
+               default:
+                       LASSERTF(0, "called with bad set %u\n", set);
+                       break;
+       }
+
+       return 1;
 }
 EXPORT_SYMBOL(__cfs_fail_check_set);
 
index b8aaa12..bb3ab35 100644 (file)
@@ -519,42 +519,42 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
 
 void
 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                       cfs_hlist_node_t *hnode)
+                      cfs_hlist_node_t *hnode)
 {
-        int                rc;
+       int                rc;
 
-        rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
-        cfs_hash_bd_dep_record(hs, bd, rc);
-        bd->bd_bucket->hsb_version++;
-        if (unlikely(bd->bd_bucket->hsb_version == 0))
-                bd->bd_bucket->hsb_version++;
-        bd->bd_bucket->hsb_count++;
+       rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
+       cfs_hash_bd_dep_record(hs, bd, rc);
+       bd->bd_bucket->hsb_version++;
+       if (unlikely(bd->bd_bucket->hsb_version == 0))
+               bd->bd_bucket->hsb_version++;
+       bd->bd_bucket->hsb_count++;
 
-        if (cfs_hash_with_counter(hs))
-                cfs_atomic_inc(&hs->hs_count);
-        if (!cfs_hash_with_no_itemref(hs))
-                cfs_hash_get(hs, hnode);
+       if (cfs_hash_with_counter(hs))
+               atomic_inc(&hs->hs_count);
+       if (!cfs_hash_with_no_itemref(hs))
+               cfs_hash_get(hs, hnode);
 }
 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 
 void
 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                       cfs_hlist_node_t *hnode)
+                      cfs_hlist_node_t *hnode)
 {
-        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
+       hs->hs_hops->hop_hnode_del(hs, bd, hnode);
 
-        LASSERT(bd->bd_bucket->hsb_count > 0);
-        bd->bd_bucket->hsb_count--;
-        bd->bd_bucket->hsb_version++;
-        if (unlikely(bd->bd_bucket->hsb_version == 0))
-                bd->bd_bucket->hsb_version++;
+       LASSERT(bd->bd_bucket->hsb_count > 0);
+       bd->bd_bucket->hsb_count--;
+       bd->bd_bucket->hsb_version++;
+       if (unlikely(bd->bd_bucket->hsb_version == 0))
+               bd->bd_bucket->hsb_version++;
 
-        if (cfs_hash_with_counter(hs)) {
-                LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
-                cfs_atomic_dec(&hs->hs_count);
-        }
-        if (!cfs_hash_with_no_itemref(hs))
-                cfs_hash_put_locked(hs, hnode);
+       if (cfs_hash_with_counter(hs)) {
+               LASSERT(atomic_read(&hs->hs_count) > 0);
+               atomic_dec(&hs->hs_count);
+       }
+       if (!cfs_hash_with_no_itemref(hs))
+               cfs_hash_put_locked(hs, hnode);
 }
 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 
@@ -1056,15 +1056,15 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
         if (hs == NULL)
                 RETURN(NULL);
 
-        strncpy(hs->hs_name, name, len);
-        hs->hs_name[len - 1] = '\0';
-        hs->hs_flags = flags;
+       strncpy(hs->hs_name, name, len);
+       hs->hs_name[len - 1] = '\0';
+       hs->hs_flags = flags;
 
-        cfs_atomic_set(&hs->hs_refcount, 1);
-        cfs_atomic_set(&hs->hs_count, 0);
+       atomic_set(&hs->hs_refcount, 1);
+       atomic_set(&hs->hs_count, 0);
 
-        cfs_hash_lock_setup(hs);
-        cfs_hash_hlist_setup(hs);
+       cfs_hash_lock_setup(hs);
+       cfs_hash_hlist_setup(hs);
 
         hs->hs_cur_bits = (__u8)cur_bits;
         hs->hs_min_bits = (__u8)cur_bits;
@@ -1144,29 +1144,29 @@ cfs_hash_destroy(cfs_hash_t *hs)
                cond_resched();
        }
 
-        LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
+       LASSERT(atomic_read(&hs->hs_count) == 0);
 
-        cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
-                              0, CFS_HASH_NBKT(hs));
-        i = cfs_hash_with_bigname(hs) ?
-            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
-        LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+       cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
+                             0, CFS_HASH_NBKT(hs));
+       i = cfs_hash_with_bigname(hs) ?
+           CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
+       LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
 
-        EXIT;
+       EXIT;
 }
 
 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
 {
-        if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
-                return hs;
-        return NULL;
+       if (atomic_inc_not_zero(&hs->hs_refcount))
+               return hs;
+       return NULL;
 }
 EXPORT_SYMBOL(cfs_hash_getref);
 
 void cfs_hash_putref(cfs_hash_t *hs)
 {
-        if (cfs_atomic_dec_and_test(&hs->hs_refcount))
-                cfs_hash_destroy(hs);
+       if (atomic_dec_and_test(&hs->hs_refcount))
+               cfs_hash_destroy(hs);
 }
 EXPORT_SYMBOL(cfs_hash_putref);
 
@@ -1210,8 +1210,8 @@ cfs_hash_rehash_bits(cfs_hash_t *hs)
 static inline int
 cfs_hash_rehash_inline(cfs_hash_t *hs)
 {
-        return !cfs_hash_with_nblk_change(hs) &&
-               cfs_atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
+       return !cfs_hash_with_nblk_change(hs) &&
+              atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
 }
 
 /**
@@ -1409,22 +1409,22 @@ cfs_hash_for_each_enter(cfs_hash_t *hs)
 static void
 cfs_hash_for_each_exit(cfs_hash_t *hs)
 {
-        int remained;
-        int bits;
+       int remained;
+       int bits;
 
-        if (!cfs_hash_with_rehash(hs))
-                return;
-        cfs_hash_lock(hs, 1);
-        remained = --hs->hs_iterators;
-        bits = cfs_hash_rehash_bits(hs);
-        cfs_hash_unlock(hs, 1);
-        /* NB: it's race on cfs_has_t::hs_iterating, see above */
-        if (remained == 0)
-                hs->hs_iterating = 0;
-        if (bits > 0) {
-                cfs_hash_rehash(hs, cfs_atomic_read(&hs->hs_count) <
-                                    CFS_HASH_LOOP_HOG);
-        }
+       if (!cfs_hash_with_rehash(hs))
+               return;
+       cfs_hash_lock(hs, 1);
+       remained = --hs->hs_iterators;
+       bits = cfs_hash_rehash_bits(hs);
+       cfs_hash_unlock(hs, 1);
+       /* NB: it's race on cfs_has_t::hs_iterating, see above */
+       if (remained == 0)
+               hs->hs_iterating = 0;
+       if (bits > 0) {
+               cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
+                                   CFS_HASH_LOOP_HOG);
+       }
 }
 
 /**
@@ -1561,9 +1561,9 @@ EXPORT_SYMBOL(cfs_hash_is_empty);
 __u64
 cfs_hash_size_get(cfs_hash_t *hs)
 {
-        return cfs_hash_with_counter(hs) ?
-               cfs_atomic_read(&hs->hs_count) :
-               cfs_hash_for_each_tight(hs, NULL, NULL, 0);
+       return cfs_hash_with_counter(hs) ?
+              atomic_read(&hs->hs_count) :
+              cfs_hash_for_each_tight(hs, NULL, NULL, 0);
 }
 EXPORT_SYMBOL(cfs_hash_size_get);
 
index 799f91d..fa8c652 100644 (file)
@@ -212,18 +212,18 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
 
 /** free cpu-partition refcount */
 void
-cfs_percpt_atomic_free(cfs_atomic_t **refs)
+cfs_percpt_atomic_free(atomic_t **refs)
 {
        cfs_percpt_free(refs);
 }
 EXPORT_SYMBOL(cfs_percpt_atomic_free);
 
 /** allocate cpu-partition refcount with initial value @init_val */
-cfs_atomic_t **
+atomic_t **
 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
 {
-       cfs_atomic_t    **refs;
-       cfs_atomic_t    *ref;
+       atomic_t        **refs;
+       atomic_t        *ref;
        int             i;
 
        refs = cfs_percpt_alloc(cptab, sizeof(*ref));
@@ -231,21 +231,21 @@ cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
                return NULL;
 
        cfs_percpt_for_each(ref, i, refs)
-               cfs_atomic_set(ref, init_val);
+               atomic_set(ref, init_val);
        return refs;
 }
 EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
 
 /** return sum of cpu-partition refs */
 int
-cfs_percpt_atomic_summary(cfs_atomic_t **refs)
+cfs_percpt_atomic_summary(atomic_t **refs)
 {
-       cfs_atomic_t    *ref;
+       atomic_t        *ref;
        int             i;
        int             val = 0;
 
        cfs_percpt_for_each(ref, i, refs)
-               val += cfs_atomic_read(ref);
+               val += atomic_read(ref);
 
        return val;
 }
index 04c9ea5..20c63be 100644 (file)
@@ -462,7 +462,7 @@ static void exit_libcfs_module(void)
        remove_proc();
 
        CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
-              cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        if (cfs_sched_rehash != NULL) {
                cfs_wi_sched_destroy(cfs_sched_rehash);
@@ -481,9 +481,9 @@ static void exit_libcfs_module(void)
 #endif
        cfs_cpu_fini();
 
-       if (cfs_atomic_read(&libcfs_kmemory) != 0)
+       if (atomic_read(&libcfs_kmemory) != 0)
                CERROR("Portals memory leaked: %d bytes\n",
-                      cfs_atomic_read(&libcfs_kmemory));
+                      atomic_read(&libcfs_kmemory));
 
        rc = libcfs_debug_cleanup();
        if (rc)
index 29a65ef..34565c1 100644 (file)
@@ -55,7 +55,7 @@ static struct tracefiled_ctl trace_tctl;
 struct mutex cfs_trace_thread_mutex;
 static int thread_running = 0;
 
-cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
+atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
 
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
                                          struct cfs_trace_cpu_data *tcd);
@@ -91,7 +91,7 @@ static struct cfs_trace_page *cfs_tage_alloc(int gfp)
        }
 
        tage->page = page;
-       cfs_atomic_inc(&cfs_tage_allocated);
+       atomic_inc(&cfs_tage_allocated);
        return tage;
 }
 
@@ -102,7 +102,7 @@ static void cfs_tage_free(struct cfs_trace_page *tage)
 
        __free_page(tage->page);
        kfree(tage);
-       cfs_atomic_dec(&cfs_tage_allocated);
+       atomic_dec(&cfs_tage_allocated);
 }
 
 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
@@ -1072,17 +1072,17 @@ static int tracefiled(void *arg)
                                       "%d\n", ++i, tage->cpu);
                        printk(KERN_ERR "There are %d pages unwritten\n",
                               i);
-                }
-                __LASSERT(cfs_list_empty(&pc.pc_pages));
+               }
+               __LASSERT(cfs_list_empty(&pc.pc_pages));
 end_loop:
-                if (cfs_atomic_read(&tctl->tctl_shutdown)) {
-                        if (last_loop == 0) {
-                                last_loop = 1;
-                                continue;
-                        } else {
-                                break;
-                        }
-                }
+               if (atomic_read(&tctl->tctl_shutdown)) {
+                       if (last_loop == 0) {
+                               last_loop = 1;
+                               continue;
+                       } else {
+                               break;
+                       }
+               }
                init_waitqueue_entry_current(&__wait);
                add_wait_queue(&tctl->tctl_waitq, &__wait);
                set_current_state(TASK_INTERRUPTIBLE);
@@ -1106,7 +1106,7 @@ int cfs_trace_start_thread(void)
        init_completion(&tctl->tctl_start);
        init_completion(&tctl->tctl_stop);
        init_waitqueue_head(&tctl->tctl_waitq);
-       cfs_atomic_set(&tctl->tctl_shutdown, 0);
+       atomic_set(&tctl->tctl_shutdown, 0);
 
        if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
                rc = -ECHILD;
@@ -1128,7 +1128,7 @@ void cfs_trace_stop_thread(void)
        if (thread_running) {
                printk(KERN_INFO
                       "Lustre: shutting down debug daemon thread...\n");
-               cfs_atomic_set(&tctl->tctl_shutdown, 1);
+               atomic_set(&tctl->tctl_shutdown, 1);
                wait_for_completion(&tctl->tctl_stop);
                thread_running = 0;
        }
index d9dfb9f..b109e8c 100644 (file)
@@ -227,7 +227,7 @@ struct tracefiled_ctl {
        struct completion       tctl_stop;
        wait_queue_head_t       tctl_waitq;
        pid_t                   tctl_pid;
-       cfs_atomic_t            tctl_shutdown;
+       atomic_t                tctl_shutdown;
 };
 
 /*
index 43014e2..2a3137e 100644 (file)
 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
                                               __u64 key, void *args)
 {
-        struct upcall_cache_entry *entry;
+       struct upcall_cache_entry *entry;
 
-        LIBCFS_ALLOC(entry, sizeof(*entry));
-        if (!entry)
-                return NULL;
+       LIBCFS_ALLOC(entry, sizeof(*entry));
+       if (!entry)
+               return NULL;
 
        UC_CACHE_SET_NEW(entry);
        CFS_INIT_LIST_HEAD(&entry->ue_hash);
        entry->ue_key = key;
-       cfs_atomic_set(&entry->ue_refcount, 0);
+       atomic_set(&entry->ue_refcount, 0);
        init_waitqueue_head(&entry->ue_waitq);
        if (cache->uc_ops->init_entry)
                cache->uc_ops->init_entry(entry, args);
@@ -101,30 +101,30 @@ static inline int downcall_compare(struct upcall_cache *cache,
 
 static inline void get_entry(struct upcall_cache_entry *entry)
 {
-        cfs_atomic_inc(&entry->ue_refcount);
+       atomic_inc(&entry->ue_refcount);
 }
 
 static inline void put_entry(struct upcall_cache *cache,
-                             struct upcall_cache_entry *entry)
+                            struct upcall_cache_entry *entry)
 {
-        if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
-            (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
-                free_entry(cache, entry);
-        }
+       if (atomic_dec_and_test(&entry->ue_refcount) &&
+           (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
+               free_entry(cache, entry);
+       }
 }
 
 static int check_unlink_entry(struct upcall_cache *cache,
-                              struct upcall_cache_entry *entry)
+                             struct upcall_cache_entry *entry)
 {
-        if (UC_CACHE_IS_VALID(entry) &&
-            cfs_time_before(cfs_time_current(), entry->ue_expire))
-                return 0;
+       if (UC_CACHE_IS_VALID(entry) &&
+           cfs_time_before(cfs_time_current(), entry->ue_expire))
+               return 0;
 
-        if (UC_CACHE_IS_ACQUIRING(entry)) {
-                if (entry->ue_acquire_expire == 0 ||
-                    cfs_time_before(cfs_time_current(),
-                                    entry->ue_acquire_expire))
-                        return 0;
+       if (UC_CACHE_IS_ACQUIRING(entry)) {
+               if (entry->ue_acquire_expire == 0 ||
+                   cfs_time_before(cfs_time_current(),
+                                   entry->ue_acquire_expire))
+                       return 0;
 
                UC_CACHE_SET_EXPIRED(entry);
                wake_up_all(&entry->ue_waitq);
@@ -132,10 +132,10 @@ static int check_unlink_entry(struct upcall_cache *cache,
                UC_CACHE_SET_EXPIRED(entry);
        }
 
-        cfs_list_del_init(&entry->ue_hash);
-        if (!cfs_atomic_read(&entry->ue_refcount))
-                free_entry(cache, entry);
-        return 1;
+       cfs_list_del_init(&entry->ue_hash);
+       if (!atomic_read(&entry->ue_refcount))
+               free_entry(cache, entry);
+       return 1;
 }
 
 static inline int refresh_entry(struct upcall_cache *cache,
@@ -280,7 +280,7 @@ void upcall_cache_put_entry(struct upcall_cache *cache,
                return;
        }
 
-       LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+       LASSERT(atomic_read(&entry->ue_refcount) > 0);
        spin_lock(&cache->uc_lock);
        put_entry(cache, entry);
        spin_unlock(&cache->uc_lock);
@@ -367,17 +367,17 @@ static void cache_flush(struct upcall_cache *cache, int force)
        ENTRY;
 
        spin_lock(&cache->uc_lock);
-        for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
-                cfs_list_for_each_entry_safe(entry, next,
-                                         &cache->uc_hashtable[i], ue_hash) {
-                        if (!force && cfs_atomic_read(&entry->ue_refcount)) {
-                                UC_CACHE_SET_EXPIRED(entry);
-                                continue;
-                        }
-                        LASSERT(!cfs_atomic_read(&entry->ue_refcount));
-                        free_entry(cache, entry);
-                }
-        }
+       for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
+               cfs_list_for_each_entry_safe(entry, next,
+                                        &cache->uc_hashtable[i], ue_hash) {
+                       if (!force && atomic_read(&entry->ue_refcount)) {
+                               UC_CACHE_SET_EXPIRED(entry);
+                               continue;
+                       }
+                       LASSERT(!atomic_read(&entry->ue_refcount));
+                       free_entry(cache, entry);
+               }
+       }
        spin_unlock(&cache->uc_lock);
        EXIT;
 }
@@ -396,32 +396,32 @@ EXPORT_SYMBOL(upcall_cache_flush_all);
 
 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
 {
-        cfs_list_t *head;
-        struct upcall_cache_entry *entry;
-        int found = 0;
-        ENTRY;
+       cfs_list_t *head;
+       struct upcall_cache_entry *entry;
+       int found = 0;
+       ENTRY;
 
-        head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+       head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
        spin_lock(&cache->uc_lock);
-        cfs_list_for_each_entry(entry, head, ue_hash) {
-                if (upcall_compare(cache, entry, key, args) == 0) {
-                        found = 1;
-                        break;
-                }
-        }
+       cfs_list_for_each_entry(entry, head, ue_hash) {
+               if (upcall_compare(cache, entry, key, args) == 0) {
+                       found = 1;
+                       break;
+               }
+       }
 
-        if (found) {
-                CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
-                      "cur %lu, ex %ld/%ld\n",
-                      cache->uc_name, entry, entry->ue_key,
-                      cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
-                      cfs_time_current_sec(), entry->ue_acquire_expire,
-                      entry->ue_expire);
-                UC_CACHE_SET_EXPIRED(entry);
-                if (!cfs_atomic_read(&entry->ue_refcount))
-                        free_entry(cache, entry);
-        }
+       if (found) {
+               CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
+                     "cur %lu, ex %ld/%ld\n",
+                     cache->uc_name, entry, entry->ue_key,
+                     atomic_read(&entry->ue_refcount), entry->ue_flags,
+                     cfs_time_current_sec(), entry->ue_acquire_expire,
+                     entry->ue_expire);
+               UC_CACHE_SET_EXPIRED(entry);
+               if (!atomic_read(&entry->ue_refcount))
+                       free_entry(cache, entry);
+       }
        spin_unlock(&cache->uc_lock);
 }
 EXPORT_SYMBOL(upcall_cache_flush_one);
index b9b1027..4465acd 100644 (file)
@@ -671,18 +671,18 @@ int file_count(struct file *fp)
 
 struct dentry *dget(struct dentry *de)
 {
-    if (de) {
-        cfs_atomic_inc(&de->d_count);
-    }
-    return de;
+       if (de) {
+               atomic_inc(&de->d_count);
+       }
+       return de;
 }
 
 void dput(struct dentry *de)
 {
-    if (!de || cfs_atomic_read(&de->d_count) == 0) {
+    if (!de || atomic_read(&de->d_count) == 0) {
         return;
     }
-    if (cfs_atomic_dec_and_test(&de->d_count)) {
+    if (atomic_dec_and_test(&de->d_count)) {
        kfree(de);
     }
 }
index af32726..2c1797b 100644 (file)
@@ -43,9 +43,9 @@
 #if defined(_X86_)
 
 void __declspec (naked) FASTCALL
-cfs_atomic_add(
+atomic_add(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
     )
 {
     // ECX = i
@@ -58,9 +58,9 @@ cfs_atomic_add(
 }
 
 void __declspec (naked) FASTCALL
-cfs_atomic_sub(
+atomic_sub(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
    ) 
 {
     // ECX = i
@@ -73,8 +73,8 @@ cfs_atomic_sub(
 }
 
 void __declspec (naked) FASTCALL
-cfs_atomic_inc(
-    cfs_atomic_t *v
+atomic_inc(
+    atomic_t *v
     )
 {
     //InterlockedIncrement((PULONG)(&((v)->counter)));
@@ -88,8 +88,8 @@ cfs_atomic_inc(
 }
 
 void __declspec (naked) FASTCALL
-cfs_atomic_dec(
-    cfs_atomic_t *v
+atomic_dec(
+    atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -101,9 +101,9 @@ cfs_atomic_dec(
 }
 
 int __declspec (naked) FASTCALL 
-cfs_atomic_sub_and_test(
+atomic_sub_and_test(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
     )
 {
 
@@ -119,8 +119,8 @@ cfs_atomic_sub_and_test(
 }
 
 int __declspec (naked) FASTCALL
-cfs_atomic_inc_and_test(
-    cfs_atomic_t *v
+atomic_inc_and_test(
+    atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -134,8 +134,8 @@ cfs_atomic_inc_and_test(
 }
 
 int __declspec (naked) FASTCALL
-cfs_atomic_dec_and_test(
-    cfs_atomic_t *v
+atomic_dec_and_test(
+    atomic_t *v
     )
 {
     // ECX = v ; [ECX][0] = v->counter
@@ -151,43 +151,43 @@ cfs_atomic_dec_and_test(
 #elif defined(_AMD64_)
 
 void FASTCALL
-cfs_atomic_add(
+atomic_add(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
     )
 {
     InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (i));
 }
 
 void FASTCALL
-cfs_atomic_sub(
+atomic_sub(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
    ) 
 {
     InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (-1*i));
 }
 
 void FASTCALL
-cfs_atomic_inc(
-    cfs_atomic_t *v
+atomic_inc(
+    atomic_t *v
     )
 {
    InterlockedIncrement((PULONG)(&((v)->counter)));
 }
 
 void FASTCALL
-cfs_atomic_dec(
-    cfs_atomic_t *v
+atomic_dec(
+    atomic_t *v
     )
 {
     InterlockedDecrement((PULONG)(&((v)->counter)));
 }
 
 int FASTCALL 
-cfs_atomic_sub_and_test(
+atomic_sub_and_test(
     int i,
-    cfs_atomic_t *v
+    atomic_t *v
     )
 {
     int counter, result;
@@ -206,8 +206,8 @@ cfs_atomic_sub_and_test(
 }
 
 int FASTCALL
-cfs_atomic_inc_and_test(
-    cfs_atomic_t *v
+atomic_inc_and_test(
+    atomic_t *v
     )
 {
     int counter, result;
@@ -226,8 +226,8 @@ cfs_atomic_inc_and_test(
 }
 
 int FASTCALL
-cfs_atomic_dec_and_test(
-    cfs_atomic_t *v
+atomic_dec_and_test(
+    atomic_t *v
     )
 {
     int counter, result;
@@ -258,7 +258,7 @@ cfs_atomic_dec_and_test(
  *
  * Atomically adds \a i to \a v and returns \a i + \a v
  */
-int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
+int FASTCALL atomic_add_return(int i, atomic_t *v)
 {
     int counter, result;
 
@@ -283,18 +283,18 @@ int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
  *
  * Atomically subtracts \a i from \a v and returns \a v - \a i
  */
-int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
+int FASTCALL atomic_sub_return(int i, atomic_t *v)
 {
-       return cfs_atomic_add_return(-i, v);
+       return atomic_add_return(-i, v);
 }
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
+int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock)
 {
-       if (cfs_atomic_read(v) != 1)
+       if (atomic_read(v) != 1)
                return 0;
 
        spin_lock(lock);
-       if (cfs_atomic_dec_and_test(v))
+       if (atomic_dec_and_test(v))
                return 1;
        spin_unlock(lock);
        return 0;
index 1c91828..21dce7e 100644 (file)
@@ -55,7 +55,7 @@ struct page *virt_to_page(void *addr)
        memset(pg, 0, sizeof(struct page));
        pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
        pg->mapping = addr;
-       cfs_atomic_set(&pg->count, 1);
+       atomic_set(&pg->count, 1);
        set_bit(PG_virt, &(pg->flags));
        cfs_enter_debugger();
        return pg;
@@ -76,7 +76,7 @@ struct page *virt_to_page(void *addr)
  *   N/A
  */
 
-cfs_atomic_t libcfs_total_pages;
+atomic_t libcfs_total_pages;
 
 struct page *alloc_page(int flags)
 {
@@ -90,12 +90,12 @@ struct page *alloc_page(int flags)
 
        memset(pg, 0, sizeof(struct page));
        pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
-       cfs_atomic_set(&pg->count, 1);
+       atomic_set(&pg->count, 1);
 
        if (pg->addr) {
                if (cfs_is_flag_set(flags, __GFP_ZERO))
                        memset(pg->addr, 0, PAGE_CACHE_SIZE);
-               cfs_atomic_inc(&libcfs_total_pages);
+               atomic_inc(&libcfs_total_pages);
        } else {
                cfs_enter_debugger();
                kmem_cache_free(cfs_page_t_slab, pg);
@@ -122,11 +122,11 @@ void __free_page(struct page *pg)
 {
        ASSERT(pg != NULL);
        ASSERT(pg->addr  != NULL);
-       ASSERT(cfs_atomic_read(&pg->count) <= 1);
+       ASSERT(atomic_read(&pg->count) <= 1);
 
        if (!test_bit(PG_virt, &pg->flags)) {
                kmem_cache_free(cfs_page_p_slab, pg->addr);
-               cfs_atomic_dec(&libcfs_total_pages);
+               atomic_dec(&libcfs_total_pages);
        } else {
                cfs_enter_debugger();
        }
index 696e8ae..a2b3e1d 100644 (file)
@@ -101,7 +101,7 @@ void init_waitqueue_entry_current(wait_queue_t *link)
     link->event = &(slot->Event);
     link->hits  = &(slot->hits);
 
-    cfs_atomic_inc(&slot->count);
+    atomic_inc(&slot->count);
 
     CFS_INIT_LIST_HEAD(&(link->waitq[0].link));
     CFS_INIT_LIST_HEAD(&(link->waitq[1].link));
@@ -141,7 +141,7 @@ void cfs_waitlink_fini(wait_queue_t *link)
     cfs_assert(link->waitq[0].waitq == NULL);
     cfs_assert(link->waitq[1].waitq == NULL);
 
-    cfs_atomic_dec(&slot->count);
+    atomic_dec(&slot->count);
 }
 
 
@@ -332,7 +332,7 @@ void wake_up_nr(wait_queue_head_t *waitq, int nr)
         LASSERT( result == FALSE || result == TRUE );
 
         if (result) {
-            cfs_atomic_inc(waitl->hits);
+           atomic_inc(waitl->hits);
         }
 
         if ((waitl->flags & CFS_WAITQ_EXCLUSIVE) && --nr == 0)
@@ -404,9 +404,9 @@ void waitq_wait(wait_queue_t *link, long state)
     LASSERT(link != NULL);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
-    if (cfs_atomic_read(link->hits) > 0) {
-        cfs_atomic_dec(link->hits);
-        LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
+    if (atomic_read(link->hits) > 0) {
+       atomic_dec(link->hits);
+       LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
     } else {
         cfs_wait_event_internal(link->event, 0);
     }
@@ -434,9 +434,9 @@ int64_t waitq_timedwait( wait_queue_t *link,
                              int64_t timeout)
 { 
 
-    if (cfs_atomic_read(link->hits) > 0) {
-        cfs_atomic_dec(link->hits);
-        LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
+    if (atomic_read(link->hits) > 0) {
+       atomic_dec(link->hits);
+       LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
         return (int64_t)TRUE;
     }
 
index 6edddcc..f68d05e 100644 (file)
@@ -4368,7 +4368,7 @@ ks_create_tconn()
 void
 ks_free_tconn(ks_tconn_t * tconn)
 {
-    LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
+    LASSERT(atomic_read(&(tconn->kstc_refcount)) == 0);
 
        spin_lock(&(ks_data.ksnd_tconn_lock));
 
@@ -4511,7 +4511,7 @@ ks_get_tconn(
     ks_tconn_t * tconn
     )
 {
-    cfs_atomic_inc(&(tconn->kstc_refcount));
+    atomic_inc(&(tconn->kstc_refcount));
 }
 
 /*
@@ -4534,7 +4534,7 @@ ks_put_tconn(
     ks_tconn_t *tconn
     )
 {
-    if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
+    if (atomic_dec_and_test(&(tconn->kstc_refcount))) {
 
        spin_lock(&(tconn->kstc_lock));