# Random bytes
s/\bcfs_get_random_bytes_prim\b/get_random_bytes/g
/#[ \t]*define[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)[ \t]*\bget_random_bytes\b *( *\w* *, *\w* *)/d
+
+# atomic
+s/\bcfs_atomic_t\b/atomic_t/g
+/typedef[ \t]*\batomic_t\b[ \t]*\batomic_t\b/d
+s/\bcfs_atomic_read\b/atomic_read/g
+/#[ \t]*define[ \t]*\batomic_read\b *( *\w* *)[ \t]*\batomic_read\b *( *\w* *)/d
+s/\bcfs_atomic_add_unless\b/atomic_add_unless/g
+s/\bcfs_atomic_cmpxchg\b/atomic_cmpxchg/g
+s/\bcfs_atomic_inc\b/atomic_inc/g
+/#[ \t]*define[ \t]*\batomic_inc\b *( *\w* *)[ \t]*\batomic_inc\b *( *\w* *)/d
+s/\bcfs_atomic_inc_and_test\b/atomic_inc_and_test/g
+/#[ \t]*define[ \t]*\batomic_inc_and_test\b *( *\w* *)[ \t]*\batomic_inc_and_test\b *( *\w* *)/d
+s/\bcfs_atomic_inc_return\b/atomic_inc_return/g
+/#[ \t]*define[ \t]*\batomic_inc_return\b *( *\w* *)[ \t]*\batomic_inc_return\b *( *\w* *)/d
+s/\bcfs_atomic_inc_not_zero\b/atomic_inc_not_zero/g
+/#[ \t]*define[ \t]*\batomic_inc_not_zero\b *( *\w* *)[ \t]*\batomic_inc_not_zero\b *( *\w* *)/d
+s/\bcfs_atomic_dec\b/atomic_dec/g
+/#[ \t]*define[ \t]*\batomic_dec\b *( *\w* *)[ \t]*\batomic_dec\b *( *\w* *)/d
+s/\bcfs_atomic_dec_and_test\b/atomic_dec_and_test/g
+/#[ \t]*define[ \t]*\batomic_dec_and_test\b *( *\w* *)[ \t]*\batomic_dec_and_test\b *( *\w* *)/d
+s/\bcfs_atomic_dec_return\b/atomic_dec_return/g
+/#[ \t]*define[ \t]*\batomic_dec_return\b *( *\w* *)[ \t]*\batomic_dec_return\b *( *\w* *)/d
+s/\bcfs_atomic_dec_and_lock\b/atomic_dec_and_lock/g
+/#[ \t]*define[ \t]*\batomic_dec_and_lock\b *( *\w* *, *\w* *)[ \t]*\batomic_dec_and_lock\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_set\b/atomic_set/g
+/#[ \t]*define[ \t]*\batomic_set\b *( *\w* *, *\w* *)[ \t]*\batomic_set\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_add\b/atomic_add/g
+/#[ \t]*define[ \t]*\batomic_add\b *( *\w* *, *\w* *)[ \t]*\batomic_add\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_add_return\b/atomic_add_return/g
+/#[ \t]*define[ \t]*\batomic_add_return\b *( *\w* *, *\w* *)[ \t]*\batomic_add_return\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub\b/atomic_sub/g
+/#[ \t]*define[ \t]*\batomic_sub\b *( *\w* *, *\w* *)[ \t]*\batomic_sub\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub_and_test\b/atomic_sub_and_test/g
+/#[ \t]*define[ \t]*\batomic_sub_and_test\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_and_test\b *( *\w* *, *\w* *)/d
+s/\bcfs_atomic_sub_return\b/atomic_sub_return/g
+/#[ \t]*define[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)[ \t]*\batomic_sub_return\b *( *\w* *, *\w* *)/d
+s/\bCFS_ATOMIC_INIT\b/ATOMIC_INIT/g
+/#[ \t]*define[ \t]*\bATOMIC_INIT\b *( *\w* *)[ \t]*\bATOMIC_INIT\b *( *\w* *)/d
/*
+ * - * atomic
+ */
+
+typedef atomic_t cfs_atomic_t;
+
+#define cfs_atomic_read(atom) atomic_read(atom)
+#define cfs_atomic_inc(atom) atomic_inc(atom)
+#define cfs_atomic_inc_and_test(atom) atomic_inc_and_test(atom)
+#define cfs_atomic_inc_return(atom) atomic_inc_return(atom)
+#define cfs_atomic_inc_not_zero(atom) atomic_inc_not_zero(atom)
+#define cfs_atomic_add_unless(atom, a, u) atomic_add_unless(atom, a, u)
+#define cfs_atomic_dec(atom) atomic_dec(atom)
+#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
+#define cfs_atomic_dec_and_lock(atom, lock) atomic_dec_and_lock(atom, lock)
+#define cfs_atomic_dec_return(atom) atomic_dec_return(atom)
+#define cfs_atomic_set(atom, value) atomic_set(atom, value)
+#define cfs_atomic_add(value, atom) atomic_add(value, atom)
+#define cfs_atomic_add_return(value, atom) atomic_add_return(value, atom)
+#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
+#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
+#define cfs_atomic_sub_return(value, atom) atomic_sub_return(value, atom)
+#define cfs_atomic_cmpxchg(atom, old, nv) atomic_cmpxchg(atom, old, nv)
+#define CFS_ATOMIC_INIT(i) ATOMIC_INIT(i)
+
+/*
* Some (nomina odiosa sunt) platforms define NULL as naked 0. This confuses
* Lustre RETURN(NULL) macro.
*/
/** hash list operations */
struct cfs_hash_hlist_ops *hs_hops;
/** hash buckets-table */
- cfs_hash_bucket_t **hs_buckets;
- /** total number of items on this hash-table */
- cfs_atomic_t hs_count;
- /** hash flags, see cfs_hash_tag for detail */
- __u16 hs_flags;
- /** # of extra-bytes for bucket, for user saving extended attributes */
+ cfs_hash_bucket_t **hs_buckets;
+ /** total number of items on this hash-table */
+ atomic_t hs_count;
+ /** hash flags, see cfs_hash_tag for detail */
+ __u16 hs_flags;
+ /** # of extra-bytes for bucket, for user saving extended attributes */
__u16 hs_extra_bytes;
/** wants to iterate */
__u8 hs_iterating;
__u32 hs_rehash_count;
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
- /** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
- /** refcount on this hash table */
- cfs_atomic_t hs_refcount;
- /** rehash buckets-table */
- cfs_hash_bucket_t **hs_rehash_buckets;
+ /** rehash workitem */
+ cfs_workitem_t hs_rehash_wi;
+ /** refcount on this hash table */
+ atomic_t hs_refcount;
+ /** rehash buckets-table */
+ cfs_hash_bucket_t **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
spinlock_t hs_dep_lock;
}
static inline int cfs_hash_dec_and_lock(cfs_hash_t *hs,
- cfs_atomic_t *condition)
+ atomic_t *condition)
{
- LASSERT(cfs_hash_with_no_bktlock(hs));
- return cfs_atomic_dec_and_lock(condition, &hs->hs_lock.spin);
+ LASSERT(cfs_hash_with_no_bktlock(hs));
+ return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
}
static inline void cfs_hash_bd_lock(cfs_hash_t *hs,
cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode);
static inline int cfs_hash_bd_dec_and_lock(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_atomic_t *condition)
+ atomic_t *condition)
{
- LASSERT(cfs_hash_with_spin_bktlock(hs));
- return cfs_atomic_dec_and_lock(condition,
- &bd->bd_bucket->hsb_lock.spin);
+ LASSERT(cfs_hash_with_spin_bktlock(hs));
+ return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
}
static inline cfs_hlist_head_t *cfs_hash_bd_hhead(cfs_hash_t *hs,
static inline int __cfs_hash_theta(cfs_hash_t *hs)
{
- return (cfs_atomic_read(&hs->hs_count) <<
- CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
+ return (atomic_read(&hs->hs_count) <<
+ CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
}
static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
lbug_with_loc(&msgdata); \
} while(0)
-extern cfs_atomic_t libcfs_kmemory;
+extern atomic_t libcfs_kmemory;
/*
* Memory
*/
# define libcfs_kmem_inc(ptr, size) \
do { \
- cfs_atomic_add(size, &libcfs_kmemory); \
+ atomic_add(size, &libcfs_kmemory); \
} while (0)
# define libcfs_kmem_dec(ptr, size) \
do { \
- cfs_atomic_sub(size, &libcfs_kmemory); \
+ atomic_sub(size, &libcfs_kmemory); \
} while (0)
# define libcfs_kmem_read() \
- cfs_atomic_read(&libcfs_kmemory)
+ atomic_read(&libcfs_kmemory)
#else
# define libcfs_kmem_inc(ptr, size) do {} while (0)
/** assert value of @a is equal to @v */
#define LASSERT_ATOMIC_EQ(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) == v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) == v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is unequal to @v */
#define LASSERT_ATOMIC_NE(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) != v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) != v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is little than @v */
#define LASSERT_ATOMIC_LT(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) < v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) < v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is little/equal to @v */
#define LASSERT_ATOMIC_LE(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) <= v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) <= v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is great than @v */
#define LASSERT_ATOMIC_GT(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) > v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) > v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is great/equal to @v */
#define LASSERT_ATOMIC_GE(a, v) \
do { \
- LASSERTF(cfs_atomic_read(a) >= v, \
- "value: %d\n", cfs_atomic_read((a))); \
+ LASSERTF(atomic_read(a) >= v, \
+ "value: %d\n", atomic_read((a))); \
} while (0)
/** assert value of @a is great than @v1 and little than @v2 */
#define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
do { \
- int __v = cfs_atomic_read(a); \
- LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
+ int __v = atomic_read(a); \
+ LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
} while (0)
/** assert value of @a is great than @v1 and little/equal to @v2 */
#define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
do { \
- int __v = cfs_atomic_read(a); \
- LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
+ int __v = atomic_read(a); \
+ LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
} while (0)
/** assert value of @a is great/equal to @v1 and little than @v2 */
#define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
do { \
- int __v = cfs_atomic_read(a); \
- LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
+ int __v = atomic_read(a); \
+ LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
} while (0)
/** assert value of @a is great/equal to @v1 and little/equal to @v2 */
#define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
do { \
- int __v = cfs_atomic_read(a); \
- LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
+ int __v = atomic_read(a); \
+ LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
} while (0)
#else /* !LASSERT_ATOMIC_ENABLED */
/* unlock private lock \a index of \a pcl */
void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
/* create percpt (atomic) refcount based on @cptab */
-cfs_atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
+atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
/* destroy percpt refcount */
-void cfs_percpt_atomic_free(cfs_atomic_t **refs);
+void cfs_percpt_atomic_free(atomic_t **refs);
/* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(cfs_atomic_t **refs);
+int cfs_percpt_atomic_summary(atomic_t **refs);
/** Compile-time assertion.
#include <linux/sched.h> /* THREAD_SIZE */
#include <linux/rbtree.h>
-
#if !defined(__x86_64__)
# ifdef __ia64__
# define CDEBUG_STACK() (THREAD_SIZE - \
module_init(init); \
module_exit(fini)
-/*
- * atomic
- */
-
-typedef atomic_t cfs_atomic_t;
-
-#define cfs_atomic_read(atom) atomic_read(atom)
-#define cfs_atomic_inc(atom) atomic_inc(atom)
-#define cfs_atomic_inc_and_test(atom) atomic_inc_and_test(atom)
-#define cfs_atomic_inc_return(atom) atomic_inc_return(atom)
-#define cfs_atomic_inc_not_zero(atom) atomic_inc_not_zero(atom)
-#define cfs_atomic_add_unless(atom, a, u) atomic_add_unless(atom, a, u)
-#define cfs_atomic_dec(atom) atomic_dec(atom)
-#define cfs_atomic_dec_and_test(atom) atomic_dec_and_test(atom)
-#define cfs_atomic_dec_and_lock(atom, lock) atomic_dec_and_lock(atom, lock)
-#define cfs_atomic_dec_return(atom) atomic_dec_return(atom)
-#define cfs_atomic_set(atom, value) atomic_set(atom, value)
-#define cfs_atomic_add(value, atom) atomic_add(value, atom)
-#define cfs_atomic_add_return(value, atom) atomic_add_return(value, atom)
-#define cfs_atomic_sub(value, atom) atomic_sub(value, atom)
-#define cfs_atomic_sub_and_test(value, atom) atomic_sub_and_test(value, atom)
-#define cfs_atomic_sub_return(value, atom) atomic_sub_return(value, atom)
-#define cfs_atomic_cmpxchg(atom, old, nv) atomic_cmpxchg(atom, old, nv)
-#define CFS_ATOMIC_INIT(i) ATOMIC_INIT(i)
-
#endif
struct upcall_cache_entry {
cfs_list_t ue_hash;
__u64 ue_key;
- cfs_atomic_t ue_refcount;
+ atomic_t ue_refcount;
int ue_flags;
wait_queue_head_t ue_waitq;
cfs_time_t ue_acquire_expire;
/*
* Atomic for single-threaded user-space
*/
-typedef struct { volatile int counter; } cfs_atomic_t;
-
-#define CFS_ATOMIC_INIT(i) { (i) }
-
-#define cfs_atomic_read(a) ((a)->counter)
-#define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
-#define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
-#define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
-#define cfs_atomic_inc(a) (((a)->counter)++)
-#define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
-#define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
-#define cfs_atomic_add_return(n,a) ((a)->counter += n)
-#define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
-#define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
-#define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
-#define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
-#define cfs_atomic_add_unless(v, a, u) \
+typedef struct { volatile int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(a) ((a)->counter)
+#define atomic_set(a,b) do {(a)->counter = b; } while (0)
+#define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
+#define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
+#define atomic_inc(a) (((a)->counter)++)
+#define atomic_dec(a) do { (a)->counter--; } while (0)
+#define atomic_add(b,a) do {(a)->counter += b;} while (0)
+#define atomic_add_return(n,a) ((a)->counter += n)
+#define atomic_inc_return(a) atomic_add_return(1,a)
+#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
+#define atomic_sub_return(n,a) ((a)->counter -= n)
+#define atomic_dec_return(a) atomic_sub_return(1,a)
+#define atomic_add_unless(v, a, u) \
((v)->counter != u ? (v)->counter += a : 0)
-#define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
-#define cfs_atomic_cmpxchg(v, ov, nv) \
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_cmpxchg(v, ov, nv) \
((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
#ifdef HAVE_LIBPTHREAD
#define I_FREEING 0x0001
struct dentry {
- cfs_atomic_t d_count;
- struct {
- int len;
- char * name;
- } d_name;
- struct inode * d_inode;
- struct dentry* d_parent;
+ atomic_t d_count;
+ struct {
+ int len;
+ char * name;
+ } d_name;
+ struct inode * d_inode;
+ struct dentry* d_parent;
};
extern struct dentry *dget(struct dentry *de);
/* atomic */
-typedef struct { volatile int counter; } cfs_atomic_t;
+typedef struct { volatile int counter; } atomic_t;
-#define CFS_ATOMIC_INIT(i) { i }
+#define ATOMIC_INIT(i) { i }
-#define cfs_atomic_read(v) ((v)->counter)
-#define cfs_atomic_set(v,i) (((v)->counter) = (i))
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
-void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
-void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
+void FASTCALL atomic_add(int i, atomic_t *v);
+void FASTCALL atomic_sub(int i, atomic_t *v);
-int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
+int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
-void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
-void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
+void FASTCALL atomic_inc(atomic_t *v);
+void FASTCALL atomic_dec(atomic_t *v);
-int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
-int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
+int FASTCALL atomic_dec_and_test(atomic_t *v);
+int FASTCALL atomic_inc_and_test(atomic_t *v);
-int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
-int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
+int FASTCALL atomic_add_return(int i, atomic_t *v);
+int FASTCALL atomic_sub_return(int i, atomic_t *v);
-#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
-#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
+#define atomic_inc_return(v) atomic_add_return(1, v)
+#define atomic_dec_return(v) atomic_sub_return(1, v)
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
+int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
/* event */
struct page {
void * addr;
- cfs_atomic_t count;
+ atomic_t count;
void * private;
void * mapping;
__u32 index;
static inline void get_page(struct page *page)
{
- cfs_atomic_inc(&page->count);
+ atomic_inc(&page->count);
}
static inline void cfs_put_page(struct page *page)
{
- cfs_atomic_dec(&page->count);
+ atomic_dec(&page->count);
}
static inline int page_count(struct page *page)
{
- return cfs_atomic_read(&page->count);
+ return atomic_read(&page->count);
}
#define page_index(p) ((p)->index)
unsigned int magic;
int flags;
event_t * event;
- cfs_atomic_t * hits;
+ atomic_t * hits;
cfs_waitlink_channel_t waitq[CFS_WAITQ_CHANNELS];
};
#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
struct group_info {
int ngroups;
- cfs_atomic_t usage;
+ atomic_t usage;
gid_t small_block[NGROUPS_SMALL];
int nblocks;
gid_t *blocks[0];
};
#define get_group_info(group_info) do { \
- cfs_atomic_inc(&(group_info)->usage); \
+ atomic_inc(&(group_info)->usage); \
} while (0)
#define put_group_info(group_info) do { \
- if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
HANDLE Tid; /* Thread id */
PETHREAD Tet; /* Pointer to ethread */
- cfs_atomic_t count; /* refer count */
- cfs_atomic_t hits; /* times of waken event singaled */
+ atomic_t count; /* refer count */
+ atomic_t hits; /* times of waken event singaled */
KIRQL irql; /* irql for rwlock ... */
ks_tconn_type_t kstc_type; /* tdi connection Type */
ks_tconn_state_t kstc_state; /* tdi connection state flag */
- ks_unicode_name_t kstc_dev; /* tcp transport device name */
+ ks_unicode_name_t kstc_dev; /* tcp transport device name */
- ks_tdi_addr_t kstc_addr; /* local address handlers / Objects */
+ ks_tdi_addr_t kstc_addr; /* local address handlers / Objects */
- cfs_atomic_t kstc_refcount; /* reference count of ks_tconn_t */
+ atomic_t kstc_refcount; /* reference count of ks_tconn_t */
- cfs_list_t kstc_list; /* linked to global ksocknal_data */
+ cfs_list_t kstc_list; /* linked to global ksocknal_data */
union {
"Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);
-cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
+atomic_t libcfs_kmemory = ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);
static wait_queue_head_t debug_ctlwq;
int __cfs_fail_check_set(__u32 id, __u32 value, int set)
{
- static cfs_atomic_t cfs_fail_count = CFS_ATOMIC_INIT(0);
-
- LASSERT(!(id & CFS_FAIL_ONCE));
-
- if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
- (CFS_FAILED | CFS_FAIL_ONCE)) {
- cfs_atomic_set(&cfs_fail_count, 0); /* paranoia */
- return 0;
- }
-
- /* Fail 1/cfs_fail_val times */
- if (cfs_fail_loc & CFS_FAIL_RAND) {
- if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
- return 0;
- }
-
- /* Skip the first cfs_fail_val, then fail */
- if (cfs_fail_loc & CFS_FAIL_SKIP) {
- if (cfs_atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
- return 0;
- }
-
- /* check cfs_fail_val... */
- if (set == CFS_FAIL_LOC_VALUE) {
- if (cfs_fail_val != -1 && cfs_fail_val != value)
- return 0;
- }
-
- /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
- if (cfs_fail_loc & CFS_FAIL_SOME &&
- (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
- int count = cfs_atomic_inc_return(&cfs_fail_count);
-
- if (count >= cfs_fail_val) {
+ static atomic_t cfs_fail_count = ATOMIC_INIT(0);
+
+ LASSERT(!(id & CFS_FAIL_ONCE));
+
+ if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
+ (CFS_FAILED | CFS_FAIL_ONCE)) {
+ atomic_set(&cfs_fail_count, 0); /* paranoia */
+ return 0;
+ }
+
+ /* Fail 1/cfs_fail_val times */
+ if (cfs_fail_loc & CFS_FAIL_RAND) {
+ if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
+ return 0;
+ }
+
+ /* Skip the first cfs_fail_val, then fail */
+ if (cfs_fail_loc & CFS_FAIL_SKIP) {
+ if (atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
+ return 0;
+ }
+
+ /* check cfs_fail_val... */
+ if (set == CFS_FAIL_LOC_VALUE) {
+ if (cfs_fail_val != -1 && cfs_fail_val != value)
+ return 0;
+ }
+
+ /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
+ if (cfs_fail_loc & CFS_FAIL_SOME &&
+ (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
+ int count = atomic_inc_return(&cfs_fail_count);
+
+ if (count >= cfs_fail_val) {
set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- cfs_atomic_set(&cfs_fail_count, 0);
- /* we are lost race to increase */
- if (count > cfs_fail_val)
- return 0;
- }
- }
-
- if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
- (value & CFS_FAIL_ONCE))
+ atomic_set(&cfs_fail_count, 0);
+ /* we are lost race to increase */
+ if (count > cfs_fail_val)
+ return 0;
+ }
+ }
+
+ if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
+ (value & CFS_FAIL_ONCE))
set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
- /* Lost race to set CFS_FAILED_BIT. */
+ /* Lost race to set CFS_FAILED_BIT. */
if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
- /* If CFS_FAIL_ONCE is valid, only one process can fail,
- * otherwise multi-process can fail at the same time. */
- if (cfs_fail_loc & CFS_FAIL_ONCE)
- return 0;
- }
-
- switch (set) {
- case CFS_FAIL_LOC_NOSET:
- case CFS_FAIL_LOC_VALUE:
- break;
- case CFS_FAIL_LOC_ORSET:
- cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
- break;
- case CFS_FAIL_LOC_RESET:
- cfs_fail_loc = value;
- break;
- default:
- LASSERTF(0, "called with bad set %u\n", set);
- break;
- }
-
- return 1;
+ /* If CFS_FAIL_ONCE is valid, only one process can fail,
+ * otherwise multi-process can fail at the same time. */
+ if (cfs_fail_loc & CFS_FAIL_ONCE)
+ return 0;
+ }
+
+ switch (set) {
+ case CFS_FAIL_LOC_NOSET:
+ case CFS_FAIL_LOC_VALUE:
+ break;
+ case CFS_FAIL_LOC_ORSET:
+ cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
+ break;
+ case CFS_FAIL_LOC_RESET:
+ cfs_fail_loc = value;
+ break;
+ default:
+ LASSERTF(0, "called with bad set %u\n", set);
+ break;
+ }
+
+ return 1;
}
EXPORT_SYMBOL(__cfs_fail_check_set);
void
cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ cfs_hlist_node_t *hnode)
{
- int rc;
+ int rc;
- rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
- cfs_hash_bd_dep_record(hs, bd, rc);
- bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
- bd->bd_bucket->hsb_version++;
- bd->bd_bucket->hsb_count++;
+ rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
+ cfs_hash_bd_dep_record(hs, bd, rc);
+ bd->bd_bucket->hsb_version++;
+ if (unlikely(bd->bd_bucket->hsb_version == 0))
+ bd->bd_bucket->hsb_version++;
+ bd->bd_bucket->hsb_count++;
- if (cfs_hash_with_counter(hs))
- cfs_atomic_inc(&hs->hs_count);
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_get(hs, hnode);
+ if (cfs_hash_with_counter(hs))
+ atomic_inc(&hs->hs_count);
+ if (!cfs_hash_with_no_itemref(hs))
+ cfs_hash_get(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_bd_add_locked);
void
cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- cfs_hlist_node_t *hnode)
+ cfs_hlist_node_t *hnode)
{
- hs->hs_hops->hop_hnode_del(hs, bd, hnode);
+ hs->hs_hops->hop_hnode_del(hs, bd, hnode);
- LASSERT(bd->bd_bucket->hsb_count > 0);
- bd->bd_bucket->hsb_count--;
- bd->bd_bucket->hsb_version++;
- if (unlikely(bd->bd_bucket->hsb_version == 0))
- bd->bd_bucket->hsb_version++;
+ LASSERT(bd->bd_bucket->hsb_count > 0);
+ bd->bd_bucket->hsb_count--;
+ bd->bd_bucket->hsb_version++;
+ if (unlikely(bd->bd_bucket->hsb_version == 0))
+ bd->bd_bucket->hsb_version++;
- if (cfs_hash_with_counter(hs)) {
- LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
- cfs_atomic_dec(&hs->hs_count);
- }
- if (!cfs_hash_with_no_itemref(hs))
- cfs_hash_put_locked(hs, hnode);
+ if (cfs_hash_with_counter(hs)) {
+ LASSERT(atomic_read(&hs->hs_count) > 0);
+ atomic_dec(&hs->hs_count);
+ }
+ if (!cfs_hash_with_no_itemref(hs))
+ cfs_hash_put_locked(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_bd_del_locked);
if (hs == NULL)
RETURN(NULL);
- strncpy(hs->hs_name, name, len);
- hs->hs_name[len - 1] = '\0';
- hs->hs_flags = flags;
+ strncpy(hs->hs_name, name, len);
+ hs->hs_name[len - 1] = '\0';
+ hs->hs_flags = flags;
- cfs_atomic_set(&hs->hs_refcount, 1);
- cfs_atomic_set(&hs->hs_count, 0);
+ atomic_set(&hs->hs_refcount, 1);
+ atomic_set(&hs->hs_count, 0);
- cfs_hash_lock_setup(hs);
- cfs_hash_hlist_setup(hs);
+ cfs_hash_lock_setup(hs);
+ cfs_hash_hlist_setup(hs);
hs->hs_cur_bits = (__u8)cur_bits;
hs->hs_min_bits = (__u8)cur_bits;
cond_resched();
}
- LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
+ LASSERT(atomic_read(&hs->hs_count) == 0);
- cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
- 0, CFS_HASH_NBKT(hs));
- i = cfs_hash_with_bigname(hs) ?
- CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
- LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
+ cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
+ 0, CFS_HASH_NBKT(hs));
+ i = cfs_hash_with_bigname(hs) ?
+ CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
+ LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
- EXIT;
+ EXIT;
}
cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
{
- if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
- return hs;
- return NULL;
+ if (atomic_inc_not_zero(&hs->hs_refcount))
+ return hs;
+ return NULL;
}
EXPORT_SYMBOL(cfs_hash_getref);
void cfs_hash_putref(cfs_hash_t *hs)
{
- if (cfs_atomic_dec_and_test(&hs->hs_refcount))
- cfs_hash_destroy(hs);
+ if (atomic_dec_and_test(&hs->hs_refcount))
+ cfs_hash_destroy(hs);
}
EXPORT_SYMBOL(cfs_hash_putref);
static inline int
cfs_hash_rehash_inline(cfs_hash_t *hs)
{
- return !cfs_hash_with_nblk_change(hs) &&
- cfs_atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
+ return !cfs_hash_with_nblk_change(hs) &&
+ atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
}
/**
static void
cfs_hash_for_each_exit(cfs_hash_t *hs)
{
- int remained;
- int bits;
+ int remained;
+ int bits;
- if (!cfs_hash_with_rehash(hs))
- return;
- cfs_hash_lock(hs, 1);
- remained = --hs->hs_iterators;
- bits = cfs_hash_rehash_bits(hs);
- cfs_hash_unlock(hs, 1);
- /* NB: it's race on cfs_has_t::hs_iterating, see above */
- if (remained == 0)
- hs->hs_iterating = 0;
- if (bits > 0) {
- cfs_hash_rehash(hs, cfs_atomic_read(&hs->hs_count) <
- CFS_HASH_LOOP_HOG);
- }
+ if (!cfs_hash_with_rehash(hs))
+ return;
+ cfs_hash_lock(hs, 1);
+ remained = --hs->hs_iterators;
+ bits = cfs_hash_rehash_bits(hs);
+ cfs_hash_unlock(hs, 1);
+ /* NB: it's race on cfs_has_t::hs_iterating, see above */
+ if (remained == 0)
+ hs->hs_iterating = 0;
+ if (bits > 0) {
+ cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
+ CFS_HASH_LOOP_HOG);
+ }
}
/**
__u64
cfs_hash_size_get(cfs_hash_t *hs)
{
- return cfs_hash_with_counter(hs) ?
- cfs_atomic_read(&hs->hs_count) :
- cfs_hash_for_each_tight(hs, NULL, NULL, 0);
+ return cfs_hash_with_counter(hs) ?
+ atomic_read(&hs->hs_count) :
+ cfs_hash_for_each_tight(hs, NULL, NULL, 0);
}
EXPORT_SYMBOL(cfs_hash_size_get);
/** free cpu-partition refcount */
void
-cfs_percpt_atomic_free(cfs_atomic_t **refs)
+cfs_percpt_atomic_free(atomic_t **refs)
{
cfs_percpt_free(refs);
}
EXPORT_SYMBOL(cfs_percpt_atomic_free);
/** allocate cpu-partition refcount with initial value @init_val */
-cfs_atomic_t **
+atomic_t **
cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
{
- cfs_atomic_t **refs;
- cfs_atomic_t *ref;
+ atomic_t **refs;
+ atomic_t *ref;
int i;
refs = cfs_percpt_alloc(cptab, sizeof(*ref));
return NULL;
cfs_percpt_for_each(ref, i, refs)
- cfs_atomic_set(ref, init_val);
+ atomic_set(ref, init_val);
return refs;
}
EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
/** return sum of cpu-partition refs */
int
-cfs_percpt_atomic_summary(cfs_atomic_t **refs)
+cfs_percpt_atomic_summary(atomic_t **refs)
{
- cfs_atomic_t *ref;
+ atomic_t *ref;
int i;
int val = 0;
cfs_percpt_for_each(ref, i, refs)
- val += cfs_atomic_read(ref);
+ val += atomic_read(ref);
return val;
}
remove_proc();
CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
if (cfs_sched_rehash != NULL) {
cfs_wi_sched_destroy(cfs_sched_rehash);
#endif
cfs_cpu_fini();
- if (cfs_atomic_read(&libcfs_kmemory) != 0)
+ if (atomic_read(&libcfs_kmemory) != 0)
CERROR("Portals memory leaked: %d bytes\n",
- cfs_atomic_read(&libcfs_kmemory));
+ atomic_read(&libcfs_kmemory));
rc = libcfs_debug_cleanup();
if (rc)
struct mutex cfs_trace_thread_mutex;
static int thread_running = 0;
-cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
+atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd);
}
tage->page = page;
- cfs_atomic_inc(&cfs_tage_allocated);
+ atomic_inc(&cfs_tage_allocated);
return tage;
}
__free_page(tage->page);
kfree(tage);
- cfs_atomic_dec(&cfs_tage_allocated);
+ atomic_dec(&cfs_tage_allocated);
}
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
"%d\n", ++i, tage->cpu);
printk(KERN_ERR "There are %d pages unwritten\n",
i);
- }
- __LASSERT(cfs_list_empty(&pc.pc_pages));
+ }
+ __LASSERT(cfs_list_empty(&pc.pc_pages));
end_loop:
- if (cfs_atomic_read(&tctl->tctl_shutdown)) {
- if (last_loop == 0) {
- last_loop = 1;
- continue;
- } else {
- break;
- }
- }
+ if (atomic_read(&tctl->tctl_shutdown)) {
+ if (last_loop == 0) {
+ last_loop = 1;
+ continue;
+ } else {
+ break;
+ }
+ }
init_waitqueue_entry_current(&__wait);
add_wait_queue(&tctl->tctl_waitq, &__wait);
set_current_state(TASK_INTERRUPTIBLE);
init_completion(&tctl->tctl_start);
init_completion(&tctl->tctl_stop);
init_waitqueue_head(&tctl->tctl_waitq);
- cfs_atomic_set(&tctl->tctl_shutdown, 0);
+ atomic_set(&tctl->tctl_shutdown, 0);
if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
rc = -ECHILD;
if (thread_running) {
printk(KERN_INFO
"Lustre: shutting down debug daemon thread...\n");
- cfs_atomic_set(&tctl->tctl_shutdown, 1);
+ atomic_set(&tctl->tctl_shutdown, 1);
wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
struct completion tctl_stop;
wait_queue_head_t tctl_waitq;
pid_t tctl_pid;
- cfs_atomic_t tctl_shutdown;
+ atomic_t tctl_shutdown;
};
/*
static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
__u64 key, void *args)
{
- struct upcall_cache_entry *entry;
+ struct upcall_cache_entry *entry;
- LIBCFS_ALLOC(entry, sizeof(*entry));
- if (!entry)
- return NULL;
+ LIBCFS_ALLOC(entry, sizeof(*entry));
+ if (!entry)
+ return NULL;
UC_CACHE_SET_NEW(entry);
CFS_INIT_LIST_HEAD(&entry->ue_hash);
entry->ue_key = key;
- cfs_atomic_set(&entry->ue_refcount, 0);
+ atomic_set(&entry->ue_refcount, 0);
init_waitqueue_head(&entry->ue_waitq);
if (cache->uc_ops->init_entry)
cache->uc_ops->init_entry(entry, args);
static inline void get_entry(struct upcall_cache_entry *entry)
{
- cfs_atomic_inc(&entry->ue_refcount);
+ atomic_inc(&entry->ue_refcount);
}
static inline void put_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
- (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
- free_entry(cache, entry);
- }
+ if (atomic_dec_and_test(&entry->ue_refcount) &&
+ (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
+ free_entry(cache, entry);
+ }
}
static int check_unlink_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry)
{
- if (UC_CACHE_IS_VALID(entry) &&
- cfs_time_before(cfs_time_current(), entry->ue_expire))
- return 0;
+ if (UC_CACHE_IS_VALID(entry) &&
+ cfs_time_before(cfs_time_current(), entry->ue_expire))
+ return 0;
- if (UC_CACHE_IS_ACQUIRING(entry)) {
- if (entry->ue_acquire_expire == 0 ||
- cfs_time_before(cfs_time_current(),
- entry->ue_acquire_expire))
- return 0;
+ if (UC_CACHE_IS_ACQUIRING(entry)) {
+ if (entry->ue_acquire_expire == 0 ||
+ cfs_time_before(cfs_time_current(),
+ entry->ue_acquire_expire))
+ return 0;
UC_CACHE_SET_EXPIRED(entry);
wake_up_all(&entry->ue_waitq);
UC_CACHE_SET_EXPIRED(entry);
}
- cfs_list_del_init(&entry->ue_hash);
- if (!cfs_atomic_read(&entry->ue_refcount))
- free_entry(cache, entry);
- return 1;
+ cfs_list_del_init(&entry->ue_hash);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ return 1;
}
static inline int refresh_entry(struct upcall_cache *cache,
return;
}
- LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+ LASSERT(atomic_read(&entry->ue_refcount) > 0);
spin_lock(&cache->uc_lock);
put_entry(cache, entry);
spin_unlock(&cache->uc_lock);
ENTRY;
spin_lock(&cache->uc_lock);
- for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
- cfs_list_for_each_entry_safe(entry, next,
- &cache->uc_hashtable[i], ue_hash) {
- if (!force && cfs_atomic_read(&entry->ue_refcount)) {
- UC_CACHE_SET_EXPIRED(entry);
- continue;
- }
- LASSERT(!cfs_atomic_read(&entry->ue_refcount));
- free_entry(cache, entry);
- }
- }
+ for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
+ cfs_list_for_each_entry_safe(entry, next,
+ &cache->uc_hashtable[i], ue_hash) {
+ if (!force && atomic_read(&entry->ue_refcount)) {
+ UC_CACHE_SET_EXPIRED(entry);
+ continue;
+ }
+ LASSERT(!atomic_read(&entry->ue_refcount));
+ free_entry(cache, entry);
+ }
+ }
spin_unlock(&cache->uc_lock);
EXIT;
}
void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
{
- cfs_list_t *head;
- struct upcall_cache_entry *entry;
- int found = 0;
- ENTRY;
+ cfs_list_t *head;
+ struct upcall_cache_entry *entry;
+ int found = 0;
+ ENTRY;
- head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
+ head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
spin_lock(&cache->uc_lock);
- cfs_list_for_each_entry(entry, head, ue_hash) {
- if (upcall_compare(cache, entry, key, args) == 0) {
- found = 1;
- break;
- }
- }
+ cfs_list_for_each_entry(entry, head, ue_hash) {
+ if (upcall_compare(cache, entry, key, args) == 0) {
+ found = 1;
+ break;
+ }
+ }
- if (found) {
- CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
- "cur %lu, ex %ld/%ld\n",
- cache->uc_name, entry, entry->ue_key,
- cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
- cfs_time_current_sec(), entry->ue_acquire_expire,
- entry->ue_expire);
- UC_CACHE_SET_EXPIRED(entry);
- if (!cfs_atomic_read(&entry->ue_refcount))
- free_entry(cache, entry);
- }
+ if (found) {
+ CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
+ "cur %lu, ex %ld/%ld\n",
+ cache->uc_name, entry, entry->ue_key,
+ atomic_read(&entry->ue_refcount), entry->ue_flags,
+ cfs_time_current_sec(), entry->ue_acquire_expire,
+ entry->ue_expire);
+ UC_CACHE_SET_EXPIRED(entry);
+ if (!atomic_read(&entry->ue_refcount))
+ free_entry(cache, entry);
+ }
spin_unlock(&cache->uc_lock);
}
EXPORT_SYMBOL(upcall_cache_flush_one);
struct dentry *dget(struct dentry *de)
{
- if (de) {
- cfs_atomic_inc(&de->d_count);
- }
- return de;
+ if (de) {
+ atomic_inc(&de->d_count);
+ }
+ return de;
}
void dput(struct dentry *de)
{
- if (!de || cfs_atomic_read(&de->d_count) == 0) {
+ if (!de || atomic_read(&de->d_count) == 0) {
return;
}
- if (cfs_atomic_dec_and_test(&de->d_count)) {
+ if (atomic_dec_and_test(&de->d_count)) {
kfree(de);
}
}
#if defined(_X86_)
void __declspec (naked) FASTCALL
-cfs_atomic_add(
+atomic_add(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-cfs_atomic_sub(
+atomic_sub(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-cfs_atomic_inc(
- cfs_atomic_t *v
+atomic_inc(
+ atomic_t *v
)
{
//InterlockedIncrement((PULONG)(&((v)->counter)));
}
void __declspec (naked) FASTCALL
-cfs_atomic_dec(
- cfs_atomic_t *v
+atomic_dec(
+ atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-cfs_atomic_sub_and_test(
+atomic_sub_and_test(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
}
int __declspec (naked) FASTCALL
-cfs_atomic_inc_and_test(
- cfs_atomic_t *v
+atomic_inc_and_test(
+ atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-cfs_atomic_dec_and_test(
- cfs_atomic_t *v
+atomic_dec_and_test(
+ atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
#elif defined(_AMD64_)
void FASTCALL
-cfs_atomic_add(
+atomic_add(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (i));
}
void FASTCALL
-cfs_atomic_sub(
+atomic_sub(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (-1*i));
}
void FASTCALL
-cfs_atomic_inc(
- cfs_atomic_t *v
+atomic_inc(
+ atomic_t *v
)
{
InterlockedIncrement((PULONG)(&((v)->counter)));
}
void FASTCALL
-cfs_atomic_dec(
- cfs_atomic_t *v
+atomic_dec(
+ atomic_t *v
)
{
InterlockedDecrement((PULONG)(&((v)->counter)));
}
int FASTCALL
-cfs_atomic_sub_and_test(
+atomic_sub_and_test(
int i,
- cfs_atomic_t *v
+ atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-cfs_atomic_inc_and_test(
- cfs_atomic_t *v
+atomic_inc_and_test(
+ atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-cfs_atomic_dec_and_test(
- cfs_atomic_t *v
+atomic_dec_and_test(
+ atomic_t *v
)
{
int counter, result;
*
* Atomically adds \a i to \a v and returns \a i + \a v
*/
-int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
+int FASTCALL atomic_add_return(int i, atomic_t *v)
{
int counter, result;
*
* Atomically subtracts \a i from \a v and returns \a v - \a i
*/
-int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
+int FASTCALL atomic_sub_return(int i, atomic_t *v)
{
- return cfs_atomic_add_return(-i, v);
+ return atomic_add_return(-i, v);
}
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
+int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock)
{
- if (cfs_atomic_read(v) != 1)
+ if (atomic_read(v) != 1)
return 0;
spin_lock(lock);
- if (cfs_atomic_dec_and_test(v))
+ if (atomic_dec_and_test(v))
return 1;
spin_unlock(lock);
return 0;
memset(pg, 0, sizeof(struct page));
pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
pg->mapping = addr;
- cfs_atomic_set(&pg->count, 1);
+ atomic_set(&pg->count, 1);
set_bit(PG_virt, &(pg->flags));
cfs_enter_debugger();
return pg;
* N/A
*/
-cfs_atomic_t libcfs_total_pages;
+atomic_t libcfs_total_pages;
struct page *alloc_page(int flags)
{
memset(pg, 0, sizeof(struct page));
pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
- cfs_atomic_set(&pg->count, 1);
+ atomic_set(&pg->count, 1);
if (pg->addr) {
if (cfs_is_flag_set(flags, __GFP_ZERO))
memset(pg->addr, 0, PAGE_CACHE_SIZE);
- cfs_atomic_inc(&libcfs_total_pages);
+ atomic_inc(&libcfs_total_pages);
} else {
cfs_enter_debugger();
kmem_cache_free(cfs_page_t_slab, pg);
{
ASSERT(pg != NULL);
ASSERT(pg->addr != NULL);
- ASSERT(cfs_atomic_read(&pg->count) <= 1);
+ ASSERT(atomic_read(&pg->count) <= 1);
if (!test_bit(PG_virt, &pg->flags)) {
kmem_cache_free(cfs_page_p_slab, pg->addr);
- cfs_atomic_dec(&libcfs_total_pages);
+ atomic_dec(&libcfs_total_pages);
} else {
cfs_enter_debugger();
}
link->event = &(slot->Event);
link->hits = &(slot->hits);
- cfs_atomic_inc(&slot->count);
+ atomic_inc(&slot->count);
CFS_INIT_LIST_HEAD(&(link->waitq[0].link));
CFS_INIT_LIST_HEAD(&(link->waitq[1].link));
cfs_assert(link->waitq[0].waitq == NULL);
cfs_assert(link->waitq[1].waitq == NULL);
- cfs_atomic_dec(&slot->count);
+ atomic_dec(&slot->count);
}
LASSERT( result == FALSE || result == TRUE );
if (result) {
- cfs_atomic_inc(waitl->hits);
+ atomic_inc(waitl->hits);
}
if ((waitl->flags & CFS_WAITQ_EXCLUSIVE) && --nr == 0)
LASSERT(link != NULL);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
- if (cfs_atomic_read(link->hits) > 0) {
- cfs_atomic_dec(link->hits);
- LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
+ if (atomic_read(link->hits) > 0) {
+ atomic_dec(link->hits);
+ LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
} else {
cfs_wait_event_internal(link->event, 0);
}
int64_t timeout)
{
- if (cfs_atomic_read(link->hits) > 0) {
- cfs_atomic_dec(link->hits);
- LASSERT((__u32)cfs_atomic_read(link->hits) < (__u32)0xFFFFFF00);
+ if (atomic_read(link->hits) > 0) {
+ atomic_dec(link->hits);
+ LASSERT((__u32)atomic_read(link->hits) < (__u32)0xFFFFFF00);
return (int64_t)TRUE;
}
void
ks_free_tconn(ks_tconn_t * tconn)
{
- LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
+ LASSERT(atomic_read(&(tconn->kstc_refcount)) == 0);
spin_lock(&(ks_data.ksnd_tconn_lock));
ks_tconn_t * tconn
)
{
- cfs_atomic_inc(&(tconn->kstc_refcount));
+ atomic_inc(&(tconn->kstc_refcount));
}
/*
ks_tconn_t *tconn
)
{
- if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
+ if (atomic_dec_and_test(&(tconn->kstc_refcount))) {
spin_lock(&(tconn->kstc_lock));