X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=libcfs%2Finclude%2Flibcfs%2Fwinnt%2Fwinnt-lock.h;h=169cc02d24be9f9e3049a7bb7cb27cba0cb5145a;hp=3eaa8cc7c5b63dad54c86a3e6b3c1ae85a837c7f;hb=ef6225af104b9138638c71b80e87786b8e5e75e5;hpb=70e80ade90af09300396706b8910e196a7928520 diff --git a/libcfs/include/libcfs/winnt/winnt-lock.h b/libcfs/include/libcfs/winnt/winnt-lock.h index 3eaa8cc..169cc02 100644 --- a/libcfs/include/libcfs/winnt/winnt-lock.h +++ b/libcfs/include/libcfs/winnt/winnt-lock.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -16,8 +14,8 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or @@ -26,7 +24,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -49,30 +47,46 @@ /* - * nt specific part ... + * IMPORTANT !!!!!!!! + * + * All locks' declaration are not guaranteed to be initialized, + * Althought some of they are initialized in Linux. All locks + * declared by CFS_DECL_* should be initialized explicitly. + */ + +/* + * spinlock & event definitions */ +typedef struct cfs_spin_lock cfs_spinlock_t; /* atomic */ -typedef struct { volatile int counter; } atomic_t; +typedef struct { volatile int counter; } cfs_atomic_t; + +#define CFS_ATOMIC_INIT(i) { i } + +#define cfs_atomic_read(v) ((v)->counter) +#define cfs_atomic_set(v,i) (((v)->counter) = (i)) -#define ATOMIC_INIT(i) { i } +void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v); +void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v); -#define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v); -void FASTCALL atomic_add(int i, atomic_t *v); -void FASTCALL atomic_sub(int i, atomic_t *v); +void FASTCALL cfs_atomic_inc(cfs_atomic_t *v); +void FASTCALL cfs_atomic_dec(cfs_atomic_t *v); -int FASTCALL atomic_sub_and_test(int i, atomic_t *v); +int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v); +int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v); -void FASTCALL atomic_inc(atomic_t *v); -void FASTCALL atomic_dec(atomic_t *v); +int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v); +int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v); -int FASTCALL atomic_dec_and_test(atomic_t *v); -int FASTCALL atomic_inc_and_test(atomic_t *v); +#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v) +#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v) +int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock); /* event */ @@ -93,11 +107,11 @@ typedef KEVENT event_t; * Return Value: * N/A * - * Notes: + * Notes: * N/A */ static inline void - cfs_init_event(event_t *event, int type, int status) +cfs_init_event(event_t *event, int type, int status) { KeInitializeEvent( event, @@ -107,7 +121,7 @@ static inline void } /* - * cfs_wait_event + * cfs_wait_event_internal * To wait on an event to syncrhonize the process * * Arguments: @@ -118,17 +132,17 @@ static inline void * Zero: waiting timeouts * Non Zero: event signaled ... * - * Notes: + * Notes: * N/A */ static inline int64_t -cfs_wait_event(event_t * event, int64_t timeout) +cfs_wait_event_internal(event_t * event, int64_t timeout) { NTSTATUS Status; LARGE_INTEGER TimeOut; - TimeOut.QuadPart = -1 * (10000000/HZ) * timeout; + TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout; Status = KeWaitForSingleObject( event, @@ -155,7 +169,7 @@ cfs_wait_event(event_t * event, int64_t timeout) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ @@ -175,7 +189,7 @@ cfs_wake_event(event_t * event) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ @@ -185,16 +199,6 @@ cfs_clear_event(event_t * event) KeResetEvent(event); } - -/* - * IMPORTANT !!!!!!!! - * - * All locks' declaration are not guaranteed to be initialized, - * Althought some of they are initialized in Linux. All locks - * declared by CFS_DECL_* should be initialized explicitly. - */ - - /* * spin lock defintions / routines */ @@ -203,44 +207,49 @@ cfs_clear_event(event_t * event) * Warning: * * for spinlock operations, try to grab nesting acquisition of - * spinlock will cause dead-lock in MP system and current irql + * spinlock will cause dead-lock in MP system and current irql * overwritten for UP system. (UP system could allow nesting spin * acqisition, because it's not spin at all just raising the irql.) * */ -typedef struct spin_lock { - +struct cfs_spin_lock { KSPIN_LOCK lock; KIRQL irql; +}; -} spinlock_t; - - -#define CFS_DECL_SPIN(name) spinlock_t name; -#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name; +#define CFS_DECL_SPIN(name) cfs_spinlock_t name; +#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name; +#define DEFINE_SPINLOCK {0} -static inline void spin_lock_init(spinlock_t *lock) +static inline void cfs_spin_lock_init(cfs_spinlock_t *lock) { KeInitializeSpinLock(&(lock->lock)); } +static inline void cfs_spin_lock(cfs_spinlock_t *lock) +{ + KeAcquireSpinLock(&(lock->lock), &(lock->irql)); +} -static inline void spin_lock(spinlock_t *lock) +static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass) { KeAcquireSpinLock(&(lock->lock), &(lock->irql)); } -static inline void spin_unlock(spinlock_t *lock) +static inline void cfs_spin_unlock(cfs_spinlock_t *lock) { KIRQL irql = lock->irql; KeReleaseSpinLock(&(lock->lock), irql); } -#define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0) -#define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0) +#define cfs_spin_lock_irqsave(lock, flags) \ +do {(flags) = 0; cfs_spin_lock(lock);} while(0) + +#define cfs_spin_unlock_irqrestore(lock, flags) \ +do {cfs_spin_unlock(lock);} while(0) /* There's no corresponding routine in windows kernel. @@ -248,9 +257,9 @@ static inline void spin_unlock(spinlock_t *lock) no way to identify the system is MP build or UP build on the runtime. We just uses a workaround for it. */ -extern int MPSystem; +extern int libcfs_mp_system; -static int spin_trylock(spinlock_t *lock) +static int cfs_spin_trylock(cfs_spinlock_t *lock) { KIRQL Irql; int rc = 0; @@ -259,8 +268,8 @@ static int spin_trylock(spinlock_t *lock) KeRaiseIrql(DISPATCH_LEVEL, &Irql); - if (MPSystem) { - if (0 == (ulong_ptr)lock->lock) { + if (libcfs_mp_system) { + if (0 == (ulong_ptr_t)lock->lock) { #if _X86_ __asm { mov edx, dword ptr [ebp + 8] @@ -287,163 +296,173 @@ static int spin_trylock(spinlock_t *lock) return rc; } +static int cfs_spin_is_locked(cfs_spinlock_t *lock) +{ +#if _WIN32_WINNT >= 0x502 + /* KeTestSpinLock only avalilable on 2k3 server or later */ + return (!KeTestSpinLock(&lock->lock)); +#else + return (int) (lock->lock); +#endif +} + /* synchronization between cpus: it will disable all DPCs kernel task scheduler on the CPU */ -#define spin_lock_bh(x) spin_lock(x) -#define spin_unlock_bh(x) spin_unlock(x) -#define spin_lock_bh_init(x) spin_lock_init(x) +#define cfs_spin_lock_bh(x) cfs_spin_lock(x) +#define cfs_spin_unlock_bh(x) cfs_spin_unlock(x) +#define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x) /* - * rw_semaphore (using ERESOURCE) + * cfs_rw_semaphore (using ERESOURCE) */ -typedef struct rw_semaphore { +typedef struct cfs_rw_semaphore { ERESOURCE rwsem; -} rw_semaphore_t; - +} cfs_rw_semaphore_t; -#define CFS_DECL_RWSEM(name) rw_semaphore_t name -#define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name +#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name +#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name /* - * init_rwsem - * To initialize the the rw_semaphore_t structure + * cfs_init_rwsem + * To initialize the the cfs_rw_semaphore_t structure * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void init_rwsem(rw_semaphore_t *s) +static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s) { ExInitializeResourceLite(&s->rwsem); } - +#define rwsem_init cfs_init_rwsem /* - * fini_rwsem - * To finilize/destroy the the rw_semaphore_t structure + * cfs_fini_rwsem + * To finilize/destroy the the cfs_rw_semaphore_t structure * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * For winnt system, we need this routine to delete the ERESOURCE. * Just define it NULL for other systems. */ -static inline void fini_rwsem(rw_semaphore_t *s) +static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s) { ExDeleteResourceLite(&s->rwsem); } /* - * down_read - * To acquire read-lock of the rw_semahore + * cfs_down_read + * To acquire read-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void down_read(struct rw_semaphore *s) +static inline void cfs_down_read(cfs_rw_semaphore_t *s) { ExAcquireResourceSharedLite(&s->rwsem, TRUE); } +#define cfs_down_read_nested cfs_down_read /* - * down_read_trylock - * To acquire read-lock of the rw_semahore without blocking + * cfs_down_read_trylock + * To acquire read-lock of the cfs_rw_semaphore without blocking * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * Zero: failed to acquire the read lock * Non-Zero: succeeded to acquire the read lock * - * Notes: + * Notes: * This routine will return immediately without waiting. */ -static inline int down_read_trylock(struct rw_semaphore *s) +static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s) { return ExAcquireResourceSharedLite(&s->rwsem, FALSE); } /* - * down_write - * To acquire write-lock of the rw_semahore + * cfs_down_write + * To acquire write-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void down_write(struct rw_semaphore *s) +static inline void cfs_down_write(cfs_rw_semaphore_t *s) { ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE); } - +#define cfs_down_write_nested cfs_down_write /* * down_write_trylock - * To acquire write-lock of the rw_semahore without blocking + * To acquire write-lock of the cfs_rw_semaphore without blocking * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * Zero: failed to acquire the write lock * Non-Zero: succeeded to acquire the read lock * - * Notes: + * Notes: * This routine will return immediately without waiting. */ -static inline int down_write_trylock(struct rw_semaphore *s) +static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s) { return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE); } /* - * up_read - * To release read-lock of the rw_semahore + * cfs_up_read + * To release read-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void up_read(struct rw_semaphore *s) +static inline void cfs_up_read(cfs_rw_semaphore_t *s) { ExReleaseResourceForThreadLite( &(s->rwsem), @@ -452,20 +471,20 @@ static inline void up_read(struct rw_semaphore *s) /* - * up_write - * To release write-lock of the rw_semahore + * cfs_up_write + * To release write-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void up_write(struct rw_semaphore *s) +static inline void cfs_up_write(cfs_rw_semaphore_t *s) { ExReleaseResourceForThreadLite( &(s->rwsem), @@ -483,23 +502,39 @@ static inline void up_write(struct rw_semaphore *s) */ typedef struct { - spinlock_t guard; - int count; -} rwlock_t; + cfs_spinlock_t guard; + int count; +} cfs_rwlock_t; + +void cfs_rwlock_init(cfs_rwlock_t * rwlock); +void cfs_rwlock_fini(cfs_rwlock_t * rwlock); + +void cfs_read_lock(cfs_rwlock_t * rwlock); +void cfs_read_unlock(cfs_rwlock_t * rwlock); +void cfs_write_lock(cfs_rwlock_t * rwlock); +void cfs_write_unlock(cfs_rwlock_t * rwlock); + +#define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0) +#define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0) +#define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0) +#define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0) -void rwlock_init(rwlock_t * rwlock); -void rwlock_fini(rwlock_t * rwlock); +#define cfs_write_lock_bh cfs_write_lock +#define cfs_write_unlock_bh cfs_write_unlock -void read_lock(rwlock_t * rwlock); -void read_unlock(rwlock_t * rwlock); -void write_lock(rwlock_t * rwlock); -void write_unlock(rwlock_t * rwlock); +typedef struct cfs_lock_class_key { + int foo; +} cfs_lock_class_key_t; -#define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0) -#define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0) -#define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0) -#define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0) +#define cfs_lockdep_set_class(lock, class) do {} while(0) +static inline void cfs_lockdep_off(void) +{ +} + +static inline void cfs_lockdep_on(void) +{ +} /* * Semaphore @@ -509,27 +544,40 @@ void write_unlock(rwlock_t * rwlock); * - __up(x) */ -typedef struct semaphore { +typedef struct cfs_semaphore { KSEMAPHORE sem; -} mutex_t; +} cfs_semaphore_t; -static inline void sema_init(struct semaphore *s, int val) +static inline void cfs_sema_init(cfs_semaphore_t *s, int val) { KeInitializeSemaphore(&s->sem, val, val); } -static inline void __down(struct semaphore *s) +static inline void __down(cfs_semaphore_t *s) { KeWaitForSingleObject( &(s->sem), Executive, KernelMode, FALSE, NULL ); } - -static inline void __up(struct semaphore *s) +static inline void __up(cfs_semaphore_t *s) { KeReleaseSemaphore(&s->sem, 0, 1, FALSE); } +static inline int down_trylock(cfs_semaphore_t *s) +{ + LARGE_INTEGER timeout = {0}; + NTSTATUS status = + KeWaitForSingleObject( &(s->sem), Executive, + KernelMode, FALSE, &timeout); + + if (status == STATUS_SUCCESS) { + return 0; + } + + return 1; +} + /* * mutex_t: * @@ -539,6 +587,9 @@ static inline void __up(struct semaphore *s) * - mutex_down(x) */ +typedef struct cfs_semaphore cfs_mutex_t; + +#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x /* * init_mutex @@ -550,16 +601,15 @@ static inline void __up(struct semaphore *s) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ - -static inline void init_mutex(mutex_t *mutex) +#define cfs_mutex_init cfs_init_mutex +static inline void cfs_init_mutex(cfs_mutex_t *mutex) { - sema_init(mutex, 1); + cfs_sema_init(mutex, 1); } - /* * mutex_down * To acquire the mutex lock @@ -570,15 +620,26 @@ static inline void init_mutex(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void mutex_down(mutex_t *mutex) +static inline void cfs_mutex_down(cfs_mutex_t *mutex) { __down(mutex); } +static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex) +{ + __down(mutex); + return 0; +} + +#define cfs_mutex_lock(m) cfs_mutex_down(m) +#define cfs_mutex_trylock(s) down_trylock(s) +#define cfs_mutex_lock_nested(m) cfs_mutex_down(m) +#define cfs_down(m) cfs_mutex_down(m) +#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m) /* * mutex_up @@ -590,15 +651,17 @@ static inline void mutex_down(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void mutex_up(mutex_t *mutex) +static inline void cfs_mutex_up(cfs_mutex_t *mutex) { __up(mutex); } +#define cfs_mutex_unlock(m) cfs_mutex_up(m) +#define cfs_up(m) cfs_mutex_up(m) /* * init_mutex_locked @@ -610,14 +673,18 @@ static inline void mutex_up(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline init_mutex_locked(mutex_t *mutex) +static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex) +{ + cfs_init_mutex(mutex); + cfs_mutex_down(mutex); +} + +static inline void cfs_mutex_destroy(cfs_mutex_t *mutex) { - init_mutex(mutex); - mutex_down(mutex); } /* @@ -628,9 +695,9 @@ static inline init_mutex_locked(mutex_t *mutex) * - wait_for_completion(c) */ -struct completion { +typedef struct { event_t event; -}; +} cfs_completion_t; /* @@ -643,11 +710,11 @@ struct completion { * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void init_completion(struct completion *c) +static inline void cfs_init_completion(cfs_completion_t *c) { cfs_init_event(&(c->event), 1, FALSE); } @@ -663,11 +730,11 @@ static inline void init_completion(struct completion *c) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void complete(struct completion *c) +static inline void cfs_complete(cfs_completion_t *c) { cfs_wake_event(&(c->event)); } @@ -683,20 +750,21 @@ static inline void complete(struct completion *c) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void wait_for_completion(struct completion *c) +static inline void cfs_wait_for_completion(cfs_completion_t *c) { - cfs_wait_event(&(c->event), 0); + cfs_wait_event_internal(&(c->event), 0); } -/* __KERNEL__ */ -#else - -#include "../user-lock.h" +static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c) +{ + cfs_wait_event_internal(&(c->event), 0); + return 0; +} -/* __KERNEL__ */ -#endif +#else /* !__KERNEL__ */ +#endif /* !__KERNEL__ */ #endif