typedef struct spin_lock spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {} while(0)
+#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
+#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
+#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
void spin_lock_init(spinlock_t *lock);
void spin_lock(spinlock_t *lock);
unsigned int done;
cfs_waitq_t wait;
};
-typedef int (cfs_wait_handler) (int timeout);
-void set_completion_wait_handler(cfs_wait_handler *handler);
+typedef int (*cfs_wait_handler_t) (int timeout);
+void init_completion_module(cfs_wait_handler_t handler);
+int call_wait_handler(int timeout);
void init_completion(struct completion *c);
void complete(struct completion *c);
void wait_for_completion(struct completion *c);
int down_write_trylock(struct rw_semaphore *s);
void up_read(struct rw_semaphore *s);
void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
/*
* read-write lock : Need to be investigated more!!
#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
#define atomic_sub_return(n,a) ((a)->counter -= n)
#define atomic_dec_return(a) atomic_sub_return(1,a)
+#define atomic_add_unless(v, a, u) ((v)->counter != u ? (v)->counter += a : 0)
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#ifdef HAVE_LIBPTHREAD
**************************************************************************/
struct lock_class_key {
- ;
+ int foo;
};
static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
{
}
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
/* This has to be a macro, so that can be undefined in kernels that do not
* support lockdep. */
#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
#define down_read_nested(lock, subclass) down_read(lock)
#define down_write_nested(lock, subclass) down_write(lock)
+
/* !__KERNEL__ */
#endif