typedef struct spin_lock spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
-#define LASSERT_SPIN_LOCKED(lock) do {} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {} while(0)
+#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
+#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
+#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
void spin_lock_init(spinlock_t *lock);
void spin_lock(spinlock_t *lock);
unsigned int done;
cfs_waitq_t wait;
};
-
+typedef int (*cfs_wait_handler_t) (int timeout);
+void init_completion_module(cfs_wait_handler_t handler);
+int call_wait_handler(int timeout);
void init_completion(struct completion *c);
void complete(struct completion *c);
void wait_for_completion(struct completion *c);
+int wait_for_completion_interruptible(struct completion *c);
#define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
int down_write_trylock(struct rw_semaphore *s);
void up_read(struct rw_semaphore *s);
void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
/*
* read-write lock : Need to be investigated more!!
static inline void
write_unlock_irqrestore(rwlock_t *l, unsigned long f) { write_unlock(l); }
-static inline void
+static inline void
read_lock_irqsave(rwlock_t *l, unsigned long f) { read_lock(l); }
static inline void
read_unlock_irqrestore(rwlock_t *l, unsigned long f) { read_unlock(l); }
#define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
#define atomic_sub_return(n,a) ((a)->counter -= n)
#define atomic_dec_return(a) atomic_sub_return(1,a)
+#define atomic_add_unless(v, a, u) ((v)->counter != u ? (v)->counter += a : 0)
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#ifdef HAVE_LIBPTHREAD
#endif /* HAVE_LIBPTHREAD */
+/**************************************************************************
+ *
+ * Mutex interface.
+ *
+ **************************************************************************/
+
+struct mutex {
+ struct semaphore m_sem;
+};
+
+#define DEFINE_MUTEX(m) struct mutex m
+
+static inline void mutex_init(struct mutex *mutex)
+{
+ init_mutex(&mutex->m_sem);
+}
+
+static inline void mutex_lock(struct mutex *mutex)
+{
+ mutex_down(&mutex->m_sem);
+}
+
+static inline void mutex_unlock(struct mutex *mutex)
+{
+ mutex_up(&mutex->m_sem);
+}
+
+/**
+ * Try-lock this mutex.
+ *
+ *
+ * \retval 0 try-lock succeeded (lock acquired).
+ * \retval errno indicates lock contention.
+ */
+static inline int mutex_down_trylock(struct mutex *mutex)
+{
+ return 0;
+}
+
+/**
+ * Try-lock this mutex.
+ *
+ * Note, return values are negation of what is expected from down_trylock() or
+ * pthread_mutex_trylock().
+ *
+ * \retval 1 try-lock succeeded (lock acquired).
+ * \retval 0 indicates lock contention.
+ */
+static inline int mutex_trylock(struct mutex *mutex)
+{
+ return !mutex_down_trylock(mutex);
+}
+
+static inline void mutex_destroy(struct mutex *lock)
+{
+}
+
+/*
+ * This is for use in assertions _only_, i.e., this function should always
+ * return 1.
+ *
+ * \retval 1 mutex is locked.
+ *
+ * \retval 0 mutex is not locked. This should never happen.
+ */
+static inline int mutex_is_locked(struct mutex *lock)
+{
+ return 1;
+}
+
+
+/**************************************************************************
+ *
+ * Lockdep "implementation". Also see lustre_compat25.h
+ *
+ **************************************************************************/
+
+struct lock_class_key {
+ int foo;
+};
+
+static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
+{
+}
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+/* This has to be a macro, so that can be undefined in kernels that do not
+ * support lockdep. */
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define down_read_nested(lock, subclass) down_read(lock)
+#define down_write_nested(lock, subclass) down_write(lock)
+
+
/* !__KERNEL__ */
#endif