-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/*
- * nt specific part ...
+ * IMPORTANT !!!!!!!!
+ *
+ * All locks' declaration are not guaranteed to be initialized,
+ * Althought some of they are initialized in Linux. All locks
+ * declared by CFS_DECL_* should be initialized explicitly.
*/
+/*
+ * spinlock & event definitions
+ */
+
+typedef struct spin_lock spinlock_t;
/* atomic */
#define ATOMIC_INIT(i) { i }
#define atomic_read(v) ((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
+#define atomic_set(v,i) (((v)->counter) = (i))
void FASTCALL atomic_add(int i, atomic_t *v);
void FASTCALL atomic_sub(int i, atomic_t *v);
int FASTCALL atomic_dec_and_test(atomic_t *v);
int FASTCALL atomic_inc_and_test(atomic_t *v);
+int FASTCALL atomic_add_return(int i, atomic_t *v);
+int FASTCALL atomic_sub_return(int i, atomic_t *v);
+
+#define atomic_inc_return(v) atomic_add_return(1, v)
+#define atomic_dec_return(v) atomic_sub_return(1, v)
+
+int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
/* event */
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
static inline void
- cfs_init_event(event_t *event, int type, int status)
+cfs_init_event(event_t *event, int type, int status)
{
KeInitializeEvent(
event,
}
/*
- * cfs_wait_event
+ * cfs_wait_event_internal
* To wait on an event to syncrhonize the process
*
* Arguments:
* Zero: waiting timeouts
* Non Zero: event signaled ...
*
- * Notes:
+ * Notes:
* N/A
*/
static inline int64_t
-cfs_wait_event(event_t * event, int64_t timeout)
+cfs_wait_event_internal(event_t * event, int64_t timeout)
{
NTSTATUS Status;
LARGE_INTEGER TimeOut;
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
KeResetEvent(event);
}
-
-/*
- * IMPORTANT !!!!!!!!
- *
- * All locks' declaration are not guaranteed to be initialized,
- * Althought some of they are initialized in Linux. All locks
- * declared by CFS_DECL_* should be initialized explicitly.
- */
-
-
/*
* spin lock defintions / routines
*/
* Warning:
*
* for spinlock operations, try to grab nesting acquisition of
- * spinlock will cause dead-lock in MP system and current irql
+ * spinlock will cause dead-lock in MP system and current irql
* overwritten for UP system. (UP system could allow nesting spin
* acqisition, because it's not spin at all just raising the irql.)
*
*/
-typedef struct spin_lock {
-
- KSPIN_LOCK lock;
- KIRQL irql;
-
-} spinlock_t;
-
+struct spin_lock {
+ KSPIN_LOCK lock;
+ KIRQL irql;
+};
-#define CFS_DECL_SPIN(name) spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
+#define CFS_DECL_SPIN(name) spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
+#define DEFINE_SPINLOCK {0}
static inline void spin_lock_init(spinlock_t *lock)
{
- KeInitializeSpinLock(&(lock->lock));
+ KeInitializeSpinLock(&(lock->lock));
}
-
static inline void spin_lock(spinlock_t *lock)
{
- KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+}
+
+static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
+{
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
static inline void spin_unlock(spinlock_t *lock)
{
- KIRQL irql = lock->irql;
- KeReleaseSpinLock(&(lock->lock), irql);
+ KIRQL irql = lock->irql;
+ KeReleaseSpinLock(&(lock->lock), irql);
}
-#define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0)
-#define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0)
+#define spin_lock_irqsave(lock, flags) \
+ do { (flags) = 0; spin_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); } while (0)
/* There's no corresponding routine in windows kernel.
no way to identify the system is MP build or UP build
on the runtime. We just uses a workaround for it. */
-extern int MPSystem;
+extern int libcfs_mp_system;
static int spin_trylock(spinlock_t *lock)
{
- KIRQL Irql;
- int rc = 0;
+ KIRQL Irql;
+ int rc = 0;
- ASSERT(lock != NULL);
+ ASSERT(lock != NULL);
- KeRaiseIrql(DISPATCH_LEVEL, &Irql);
+ KeRaiseIrql(DISPATCH_LEVEL, &Irql);
- if (MPSystem) {
- if (0 == (ulong_ptr)lock->lock) {
+ if (libcfs_mp_system) {
+ if (0 == (ulong_ptr_t)lock->lock) {
#if _X86_
- __asm {
- mov edx, dword ptr [ebp + 8]
- lock bts dword ptr[edx], 0
- jb lock_failed
- mov rc, TRUE
- lock_failed:
- }
+ __asm {
+ mov edx, dword ptr [ebp + 8]
+ lock bts dword ptr[edx], 0
+ jb lock_failed
+ mov rc, TRUE
+ lock_failed:
+ }
#else
- KdBreakPoint();
+ KdBreakPoint();
#endif
- }
- } else {
- rc = TRUE;
- }
+ }
+ } else {
+ rc = TRUE;
+ }
- if (rc) {
- lock->irql = Irql;
- } else {
- KeLowerIrql(Irql);
- }
+ if (rc) {
+ lock->irql = Irql;
+ } else {
+ KeLowerIrql(Irql);
+ }
- return rc;
+ return rc;
+}
+
+static int spin_is_locked(spinlock_t *lock)
+{
+#if _WIN32_WINNT >= 0x502
+ /* KeTestSpinLock only avalilable on 2k3 server or later */
+ return !KeTestSpinLock(&lock->lock);
+#else
+ return (int) (lock->lock);
+#endif
}
/* synchronization between cpus: it will disable all DPCs
kernel task scheduler on the CPU */
-#define spin_lock_bh(x) spin_lock(x)
-#define spin_unlock_bh(x) spin_unlock(x)
+#define spin_lock_bh(x) spin_lock(x)
+#define spin_unlock_bh(x) spin_unlock(x)
#define spin_lock_bh_init(x) spin_lock_init(x)
/*
*/
-typedef struct rw_semaphore {
- ERESOURCE rwsem;
-} rw_semaphore_t;
-
+struct rw_semaphore {
+ ERESOURCE rwsem;
+};
-#define CFS_DECL_RWSEM(name) rw_semaphore_t name
-#define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
+#define DECLARE_RWSEM(name) struct rw_semaphore name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
/*
* init_rwsem
- * To initialize the the rw_semaphore_t structure
+ * To initialize the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
-static inline void init_rwsem(rw_semaphore_t *s)
+static inline void init_rwsem(struct rw_semaphore *s)
{
ExInitializeResourceLite(&s->rwsem);
}
-
+#define rwsem_init init_rwsem
/*
* fini_rwsem
- * To finilize/destroy the the rw_semaphore_t structure
+ * To finilize/destroy the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* For winnt system, we need this routine to delete the ERESOURCE.
* Just define it NULL for other systems.
*/
-static inline void fini_rwsem(rw_semaphore_t *s)
+static inline void fini_rwsem(struct rw_semaphore *s)
{
- ExDeleteResourceLite(&s->rwsem);
+ ExDeleteResourceLite(&s->rwsem);
}
/*
* down_read
- * To acquire read-lock of the rw_semahore
+ * To acquire read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
{
ExAcquireResourceSharedLite(&s->rwsem, TRUE);
}
+#define down_read_nested down_read
/*
* down_read_trylock
- * To acquire read-lock of the rw_semahore without blocking
+ * To acquire read-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the read lock
* Non-Zero: succeeded to acquire the read lock
*
- * Notes:
+ * Notes:
* This routine will return immediately without waiting.
*/
/*
* down_write
- * To acquire write-lock of the rw_semahore
+ * To acquire write-lock of the struct rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
{
ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
}
-
+#define down_write_nested down_write
/*
* down_write_trylock
- * To acquire write-lock of the rw_semahore without blocking
+ * To acquire write-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the write lock
* Non-Zero: succeeded to acquire the read lock
*
- * Notes:
+ * Notes:
* This routine will return immediately without waiting.
*/
static inline int down_write_trylock(struct rw_semaphore *s)
{
- return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
+ return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
}
/*
* up_read
- * To release read-lock of the rw_semahore
+ * To release read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
static inline void up_read(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
* up_write
- * To release write-lock of the rw_semahore
+ * To release write-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
static inline void up_write(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
*/
typedef struct {
- spinlock_t guard;
- int count;
+ spinlock_t guard;
+ int count;
} rwlock_t;
-void rwlock_init(rwlock_t * rwlock);
-void rwlock_fini(rwlock_t * rwlock);
+void rwlock_init(rwlock_t *rwlock);
+void cfs_rwlock_fini(rwlock_t *rwlock);
+
+void read_lock(rwlock_t *rwlock);
+void read_unlock(rwlock_t *rwlock);
+void write_lock(rwlock_t *rwlock);
+void write_unlock(rwlock_t *rwlock);
+
+#define write_lock_irqsave(l, f) do { f = 0; write_lock(l); } while (0)
+#define write_unlock_irqrestore(l, f) do { write_unlock(l); } while (0)
+#define read_lock_irqsave(l, f) do { f = 0; read_lock(l); } while (0)
+#define read_unlock_irqrestore(l, f) do { read_unlock(l); } while (0)
+
+#define write_lock_bh write_lock
+#define write_unlock_bh write_unlock
-void read_lock(rwlock_t * rwlock);
-void read_unlock(rwlock_t * rwlock);
-void write_lock(rwlock_t * rwlock);
-void write_unlock(rwlock_t * rwlock);
+struct lock_class_key {
+ int foo;
+};
-#define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0)
-#define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0)
-#define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0)
-#define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0)
+#define lockdep_set_class(lock, class) do {} while (0)
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
/*
* Semaphore
* - __up(x)
*/
-typedef struct semaphore {
+struct semaphore {
KSEMAPHORE sem;
-} mutex_t;
+};
static inline void sema_init(struct semaphore *s, int val)
{
static inline void __down(struct semaphore *s)
{
- KeWaitForSingleObject( &(s->sem), Executive,
- KernelMode, FALSE, NULL );
+ KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
}
-
static inline void __up(struct semaphore *s)
{
KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
}
+static inline int down_trylock(struct semaphore *s)
+{
+ LARGE_INTEGER timeout = {0};
+ NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
+ KernelMode, FALSE, &timeout);
+
+ if (status == STATUS_SUCCESS)
+ return 0;
+
+ return 1;
+}
+
/*
* mutex_t:
*
* - mutex_down(x)
*/
+#define mutex semaphore
+
+#define CFS_DECLARE_MUTEX(x) struct mutex x
/*
* init_mutex
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
-
-static inline void init_mutex(mutex_t *mutex)
+#define mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(struct mutex *mutex)
{
- sema_init(mutex, 1);
+ sema_init(mutex, 1);
}
-
/*
* mutex_down
* To acquire the mutex lock
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
-static inline void mutex_down(mutex_t *mutex)
+static inline void cfs_mutex_down(struct mutex *mutex)
+{
+ __down(mutex);
+}
+
+static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
{
- __down(mutex);
+ __down(mutex);
+ return 0;
}
+#define mutex_lock(m) cfs_mutex_down(m)
+#define mutex_trylock(s) down_trylock(s)
+#define mutex_lock_nested(m) cfs_mutex_down(m)
+#define down(m) cfs_mutex_down(m)
+#define down_interruptible(m) cfs_mutex_down_interruptible(m)
/*
* mutex_up
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
-static inline void mutex_up(mutex_t *mutex)
+static inline void cfs_mutex_up(struct mutex *mutex)
{
- __up(mutex);
+ __up(mutex);
}
+#define mutex_unlock(m) cfs_mutex_up(m)
+#define up(m) cfs_mutex_up(m)
/*
* init_mutex_locked
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
-static inline init_mutex_locked(mutex_t *mutex)
+static inline void cfs_init_mutex_locked(struct mutex *mutex)
+{
+ cfs_init_mutex(mutex);
+ cfs_mutex_down(mutex);
+}
+
+static inline void mutex_destroy(struct mutex *mutex)
{
- init_mutex(mutex);
- mutex_down(mutex);
}
/*
* - wait_for_completion(c)
*/
-struct completion {
+struct completion{
event_t event;
};
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
* Return Value:
* N/A
*
- * Notes:
+ * Notes:
* N/A
*/
static inline void wait_for_completion(struct completion *c)
{
- cfs_wait_event(&(c->event), 0);
+ cfs_wait_event_internal(&(c->event), 0);
}
-/* __KERNEL__ */
-#else
-
-#include "../user-lock.h"
+static inline int wait_for_completion_interruptible(struct completion *c)
+{
+ cfs_wait_event_internal(&(c->event), 0);
+ return 0;
+}
-/* __KERNEL__ */
-#endif
+#endif /* !__KERNEL__ */
#endif