-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* - spin_lock(x)
* - spin_unlock(x)
* - spin_trylock(x)
+ * - spin_lock_bh_init(x)
+ * - spin_lock_bh(x)
+ * - spin_unlock_bh(x)
*
+ * - spin_is_locked(x)
* - spin_lock_irqsave(x, f)
* - spin_unlock_irqrestore(x, f)
*
* No-op implementation.
*/
-struct spin_lock {int foo;};
+struct spin_lock { int foo; };
typedef struct spin_lock spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {} while(0)
+#define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
+#define __SPIN_LOCK_UNLOCKED(x) ((spinlock_t) {})
void spin_lock_init(spinlock_t *lock);
void spin_lock(spinlock_t *lock);
void spin_unlock(spinlock_t *lock);
-int spin_trylock(spinlock_t *lock);
+int spin_trylock(spinlock_t *lock);
void spin_lock_bh_init(spinlock_t *lock);
void spin_lock_bh(spinlock_t *lock);
void spin_unlock_bh(spinlock_t *lock);
-static inline int spin_is_locked(spinlock_t *l) {return 1;}
-static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f){}
-static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f){}
+static inline int spin_is_locked(spinlock_t *l) { return 1; }
+static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
+static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
/*
* Semaphore
* - __down(x)
* - __up(x)
*/
-typedef struct semaphore {
- int foo;
-} mutex_t;
+struct semaphore {
+ int foo;
+};
void sema_init(struct semaphore *s, int val);
-void __down(struct semaphore *s);
void __up(struct semaphore *s);
+void __down(struct semaphore *s);
+int __down_interruptible(struct semaphore *s);
-/*
- * Mutex:
- *
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - mutex_up(x)
- * - mutex_down(x)
- */
-#define DECLARE_MUTEX(name) \
- struct semaphore name = { 1 }
+#define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
-#define mutex_up(s) __up(s)
-#define up(s) mutex_up(s)
-#define mutex_down(s) __down(s)
-#define down(s) mutex_down(s)
+#define up(s) __up(s)
+#define down(s) __down(s)
+#define down_interruptible(s) __down_interruptible(s)
-#define init_MUTEX(x) sema_init(x, 1)
-#define init_MUTEX_LOCKED(x) sema_init(x, 0)
-#define init_mutex(s) init_MUTEX(s)
+static inline int down_trylock(struct semaphore *sem)
+{
+ return 0;
+}
/*
* Completion:
*
+ * - init_completion_module(c)
+ * - call_wait_handler(t)
* - init_completion(c)
* - complete(c)
* - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
*/
+#ifdef HAVE_LIBPTHREAD
+#include <pthread.h>
+
+/*
+ * Multi-threaded user space completion APIs
+ */
+
+struct completion {
+ int c_done;
+ pthread_cond_t c_cond;
+ pthread_mutex_t c_mut;
+};
+
+#else /* !HAVE_LIBPTHREAD */
+
struct completion {
- unsigned int done;
- cfs_waitq_t wait;
+ unsigned int done;
+ wait_queue_head_t wait;
};
-typedef int (*cfs_wait_handler_t) (int timeout);
-void init_completion_module(cfs_wait_handler_t handler);
+#endif /* HAVE_LIBPTHREAD */
+
+typedef int (*wait_handler_t) (int timeout);
+void init_completion_module(wait_handler_t handler);
+int call_wait_handler(int timeout);
void init_completion(struct completion *c);
-void init_completion_module(cfs_wait_handler_t handler);
+void fini_completion(struct completion *c);
void complete(struct completion *c);
void wait_for_completion(struct completion *c);
int wait_for_completion_interruptible(struct completion *c);
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#define DECLARE_COMPLETION(work) \
- struct completion work = COMPLETION_INITIALIZER(work)
-#define INIT_COMPLETION(x) ((x).done = 0)
+#define INIT_COMPLETION(x) ((x).done = 0)
/*
*
* - init_rwsem(x)
* - down_read(x)
+ * - down_read_trylock(x)
+ * - down_write(struct rw_semaphore *s);
+ * - down_write_trylock(struct rw_semaphore *s);
* - up_read(x)
- * - down_write(x)
* - up_write(x)
+ * - fini_rwsem(x)
*/
struct rw_semaphore {
- int foo;
+ int foo;
};
void init_rwsem(struct rw_semaphore *s);
void down_read(struct rw_semaphore *s);
int down_read_trylock(struct rw_semaphore *s);
void down_write(struct rw_semaphore *s);
+void downgrade_write(struct rw_semaphore *s);
int down_write_trylock(struct rw_semaphore *s);
void up_read(struct rw_semaphore *s);
void up_write(struct rw_semaphore *s);
void fini_rwsem(struct rw_semaphore *s);
+#define DECLARE_RWSEM(name) struct rw_semaphore name = { }
/*
* read-write lock : Need to be investigated more!!
* XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
*
- * - DECLARE_RWLOCK(l)
* - rwlock_init(x)
* - read_lock(x)
* - read_unlock(x)
* - write_lock(x)
* - write_unlock(x)
+ * - write_lock_irqsave(x)
+ * - write_unlock_irqrestore(x)
+ * - read_lock_irqsave(x)
+ * - read_unlock_irqrestore(x)
*/
-typedef struct rw_semaphore rwlock_t;
-#define RW_LOCK_UNLOCKED (rwlock_t) { }
+#define rwlock_t struct rw_semaphore
+#define DEFINE_RWLOCK(lock) rwlock_t lock = { }
-#define rwlock_init(pl) init_rwsem(pl)
+#define rwlock_init(pl) init_rwsem(pl)
-#define read_lock(l) down_read(l)
-#define read_unlock(l) up_read(l)
-#define write_lock(l) down_write(l)
-#define write_unlock(l) up_write(l)
+#define read_lock(l) down_read(l)
+#define read_unlock(l) up_read(l)
+#define write_lock(l) down_write(l)
+#define write_unlock(l) up_write(l)
-static inline void
-write_lock_irqsave(rwlock_t *l, unsigned long f) { write_lock(l); }
-static inline void
-write_unlock_irqrestore(rwlock_t *l, unsigned long f) { write_unlock(l); }
+static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ write_lock(l);
+}
-static inline void
-read_lock_irqsave(rwlock_t *l, unsigned long f) { read_lock(l); }
-static inline void
-read_unlock_irqrestore(rwlock_t *l, unsigned long f) { read_unlock(l); }
+static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ write_unlock(l);
+}
+
+static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ read_lock(l);
+}
+
+static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ read_unlock(l);
+}
/*
- * Atomic for user-space
- * Copied from liblustre
+ * Atomic for single-threaded user-space
*/
typedef struct { volatile int counter; } atomic_t;
#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
#define atomic_sub_return(n,a) ((a)->counter -= n)
#define atomic_dec_return(a) atomic_sub_return(1,a)
-
+#define atomic_add_unless(v, a, u) \
+ ((v)->counter != u ? (v)->counter += a : 0)
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_cmpxchg(v, ov, nv) \
+ ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
#ifdef HAVE_LIBPTHREAD
#include <pthread.h>
/*
- * Completion
+ * Multi-threaded user space atomic APIs
*/
-struct cfs_completion {
- int c_done;
- pthread_cond_t c_cond;
- pthread_mutex_t c_mut;
-};
+typedef struct { volatile int counter; } mt_atomic_t;
-void cfs_init_completion(struct cfs_completion *c);
-void cfs_fini_completion(struct cfs_completion *c);
-void cfs_complete(struct cfs_completion *c);
-void cfs_wait_for_completion(struct cfs_completion *c);
-
-/*
- * atomic.h
- */
-
-typedef struct { volatile int counter; } cfs_atomic_t;
-
-int cfs_atomic_read(cfs_atomic_t *a);
-void cfs_atomic_set(cfs_atomic_t *a, int b);
-int cfs_atomic_dec_and_test(cfs_atomic_t *a);
-void cfs_atomic_inc(cfs_atomic_t *a);
-void cfs_atomic_dec(cfs_atomic_t *a);
-void cfs_atomic_add(int b, cfs_atomic_t *a);
-void cfs_atomic_sub(int b, cfs_atomic_t *a);
+int mt_atomic_read(mt_atomic_t *a);
+void mt_atomic_set(mt_atomic_t *a, int b);
+int mt_atomic_dec_and_test(mt_atomic_t *a);
+void mt_atomic_inc(mt_atomic_t *a);
+void mt_atomic_dec(mt_atomic_t *a);
+void mt_atomic_add(int b, mt_atomic_t *a);
+void mt_atomic_sub(int b, mt_atomic_t *a);
#endif /* HAVE_LIBPTHREAD */
* Mutex interface.
*
**************************************************************************/
+#define mutex semaphore
-struct mutex {
- struct semaphore m_sem;
-};
-
-#define DEFINE_MUTEX(m) struct mutex m
+#define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
static inline void mutex_init(struct mutex *mutex)
{
- init_mutex(&mutex->m_sem);
+ sema_init(mutex, 1);
}
static inline void mutex_lock(struct mutex *mutex)
{
- mutex_down(&mutex->m_sem);
+ down(mutex);
}
static inline void mutex_unlock(struct mutex *mutex)
{
- mutex_up(&mutex->m_sem);
+ up(mutex);
}
-/**
- * Try-lock this mutex.
- *
- *
- * \retval 0 try-lock succeeded (lock acquired).
- * \retval errno indicates lock contention.
- */
-static inline int mutex_down_trylock(struct mutex *mutex)
+static inline int mutex_lock_interruptible(struct mutex *mutex)
{
- return 0;
+ return down_interruptible(mutex);
}
/**
*/
static inline int mutex_trylock(struct mutex *mutex)
{
- return !mutex_down_trylock(mutex);
+ return !down_trylock(mutex);
}
static inline void mutex_destroy(struct mutex *lock)
{
}
-/* This has to be a macro, so that can be undefined in kernels that do not
- * support lockdep. */
#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
#define spin_lock_nested(lock, subclass) spin_lock(lock)
#define down_read_nested(lock, subclass) down_read(lock)