X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=libcfs%2Finclude%2Flibcfs%2Fwinnt%2Fwinnt-lock.h;h=169cc02d24be9f9e3049a7bb7cb27cba0cb5145a;hp=0b6bac5ddc6328389008e1e842ef6a3c39fef9ea;hb=ef6225af104b9138638c71b80e87786b8e5e75e5;hpb=c8616363809a73b3d4b6c736e6b55a84d7f137b4 diff --git a/libcfs/include/libcfs/winnt/winnt-lock.h b/libcfs/include/libcfs/winnt/winnt-lock.h index 0b6bac5..169cc02 100644 --- a/libcfs/include/libcfs/winnt/winnt-lock.h +++ b/libcfs/include/libcfs/winnt/winnt-lock.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,7 +24,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -60,35 +58,35 @@ * spinlock & event definitions */ -typedef struct spin_lock spinlock_t; +typedef struct cfs_spin_lock cfs_spinlock_t; /* atomic */ -typedef struct { volatile int counter; } atomic_t; +typedef struct { volatile int counter; } cfs_atomic_t; -#define ATOMIC_INIT(i) { i } +#define CFS_ATOMIC_INIT(i) { i } -#define atomic_read(v) ((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) +#define cfs_atomic_read(v) ((v)->counter) +#define cfs_atomic_set(v,i) (((v)->counter) = (i)) -void FASTCALL atomic_add(int i, atomic_t *v); -void FASTCALL atomic_sub(int i, atomic_t *v); +void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v); +void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v); -int FASTCALL atomic_sub_and_test(int i, atomic_t *v); +int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v); -void FASTCALL atomic_inc(atomic_t *v); -void FASTCALL atomic_dec(atomic_t *v); +void FASTCALL cfs_atomic_inc(cfs_atomic_t *v); +void FASTCALL cfs_atomic_dec(cfs_atomic_t *v); -int FASTCALL atomic_dec_and_test(atomic_t *v); -int FASTCALL atomic_inc_and_test(atomic_t *v); +int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v); +int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v); -int FASTCALL atomic_add_return(int i, atomic_t *v); -int FASTCALL atomic_sub_return(int i, atomic_t *v); +int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v); +int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v); -#define atomic_inc_return(v) atomic_add_return(1, v) -#define atomic_dec_return(v) atomic_sub_return(1, v) +#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v) +#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v) -int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock); +int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock); /* event */ @@ -109,11 +107,11 @@ typedef KEVENT event_t; * Return Value: * N/A * - * Notes: + * Notes: * N/A */ static inline void - cfs_init_event(event_t *event, int type, int status) +cfs_init_event(event_t *event, int type, int status) { KeInitializeEvent( event, @@ -134,7 +132,7 @@ static inline void * Zero: waiting timeouts * Non Zero: event signaled ... * - * Notes: + * Notes: * N/A */ @@ -144,7 +142,7 @@ cfs_wait_event_internal(event_t * event, int64_t timeout) NTSTATUS Status; LARGE_INTEGER TimeOut; - TimeOut.QuadPart = -1 * (10000000/HZ) * timeout; + TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout; Status = KeWaitForSingleObject( event, @@ -171,7 +169,7 @@ cfs_wait_event_internal(event_t * event, int64_t timeout) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ @@ -191,7 +189,7 @@ cfs_wake_event(event_t * event) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ @@ -209,46 +207,49 @@ cfs_clear_event(event_t * event) * Warning: * * for spinlock operations, try to grab nesting acquisition of - * spinlock will cause dead-lock in MP system and current irql + * spinlock will cause dead-lock in MP system and current irql * overwritten for UP system. (UP system could allow nesting spin * acqisition, because it's not spin at all just raising the irql.) * */ -struct spin_lock { +struct cfs_spin_lock { KSPIN_LOCK lock; KIRQL irql; }; -#define CFS_DECL_SPIN(name) spinlock_t name; -#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name; +#define CFS_DECL_SPIN(name) cfs_spinlock_t name; +#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name; -#define SPIN_LOCK_UNLOCKED {0} +#define DEFINE_SPINLOCK {0} -static inline void spin_lock_init(spinlock_t *lock) +static inline void cfs_spin_lock_init(cfs_spinlock_t *lock) { KeInitializeSpinLock(&(lock->lock)); } -static inline void spin_lock(spinlock_t *lock) +static inline void cfs_spin_lock(cfs_spinlock_t *lock) { KeAcquireSpinLock(&(lock->lock), &(lock->irql)); } -static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass) +static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass) { KeAcquireSpinLock(&(lock->lock), &(lock->irql)); } -static inline void spin_unlock(spinlock_t *lock) +static inline void cfs_spin_unlock(cfs_spinlock_t *lock) { KIRQL irql = lock->irql; KeReleaseSpinLock(&(lock->lock), irql); } -#define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0) -#define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0) +#define cfs_spin_lock_irqsave(lock, flags) \ +do {(flags) = 0; cfs_spin_lock(lock);} while(0) + +#define cfs_spin_unlock_irqrestore(lock, flags) \ +do {cfs_spin_unlock(lock);} while(0) /* There's no corresponding routine in windows kernel. @@ -258,7 +259,7 @@ static inline void spin_unlock(spinlock_t *lock) extern int libcfs_mp_system; -static int spin_trylock(spinlock_t *lock) +static int cfs_spin_trylock(cfs_spinlock_t *lock) { KIRQL Irql; int rc = 0; @@ -295,7 +296,7 @@ static int spin_trylock(spinlock_t *lock) return rc; } -static int spin_is_locked(spinlock_t *lock) +static int cfs_spin_is_locked(cfs_spinlock_t *lock) { #if _WIN32_WINNT >= 0x502 /* KeTestSpinLock only avalilable on 2k3 server or later */ @@ -307,163 +308,161 @@ static int spin_is_locked(spinlock_t *lock) /* synchronization between cpus: it will disable all DPCs kernel task scheduler on the CPU */ -#define spin_lock_bh(x) spin_lock(x) -#define spin_unlock_bh(x) spin_unlock(x) -#define spin_lock_bh_init(x) spin_lock_init(x) +#define cfs_spin_lock_bh(x) cfs_spin_lock(x) +#define cfs_spin_unlock_bh(x) cfs_spin_unlock(x) +#define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x) /* - * rw_semaphore (using ERESOURCE) + * cfs_rw_semaphore (using ERESOURCE) */ -typedef struct rw_semaphore { +typedef struct cfs_rw_semaphore { ERESOURCE rwsem; -} rw_semaphore_t; +} cfs_rw_semaphore_t; -#define CFS_DECL_RWSEM(name) rw_semaphore_t name -#define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name -#define DECLARE_RWSEM CFS_DECL_RWSEM +#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name +#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name /* - * init_rwsem - * To initialize the the rw_semaphore_t structure + * cfs_init_rwsem + * To initialize the the cfs_rw_semaphore_t structure * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void init_rwsem(rw_semaphore_t *s) +static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s) { ExInitializeResourceLite(&s->rwsem); } -#define rwsem_init init_rwsem +#define rwsem_init cfs_init_rwsem /* - * fini_rwsem - * To finilize/destroy the the rw_semaphore_t structure + * cfs_fini_rwsem + * To finilize/destroy the the cfs_rw_semaphore_t structure * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * For winnt system, we need this routine to delete the ERESOURCE. * Just define it NULL for other systems. */ -static inline void fini_rwsem(rw_semaphore_t *s) +static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s) { ExDeleteResourceLite(&s->rwsem); } -#define rwsem_fini fini_rwsem /* - * down_read - * To acquire read-lock of the rw_semahore + * cfs_down_read + * To acquire read-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void down_read(struct rw_semaphore *s) +static inline void cfs_down_read(cfs_rw_semaphore_t *s) { ExAcquireResourceSharedLite(&s->rwsem, TRUE); } -#define down_read_nested down_read +#define cfs_down_read_nested cfs_down_read /* - * down_read_trylock - * To acquire read-lock of the rw_semahore without blocking + * cfs_down_read_trylock + * To acquire read-lock of the cfs_rw_semaphore without blocking * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * Zero: failed to acquire the read lock * Non-Zero: succeeded to acquire the read lock * - * Notes: + * Notes: * This routine will return immediately without waiting. */ -static inline int down_read_trylock(struct rw_semaphore *s) +static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s) { return ExAcquireResourceSharedLite(&s->rwsem, FALSE); } /* - * down_write - * To acquire write-lock of the rw_semahore + * cfs_down_write + * To acquire write-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void down_write(struct rw_semaphore *s) +static inline void cfs_down_write(cfs_rw_semaphore_t *s) { ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE); } -#define down_write_nested down_write +#define cfs_down_write_nested cfs_down_write /* * down_write_trylock - * To acquire write-lock of the rw_semahore without blocking + * To acquire write-lock of the cfs_rw_semaphore without blocking * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * Zero: failed to acquire the write lock * Non-Zero: succeeded to acquire the read lock * - * Notes: + * Notes: * This routine will return immediately without waiting. */ -static inline int down_write_trylock(struct rw_semaphore *s) +static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s) { return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE); } /* - * up_read - * To release read-lock of the rw_semahore + * cfs_up_read + * To release read-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void up_read(struct rw_semaphore *s) +static inline void cfs_up_read(cfs_rw_semaphore_t *s) { ExReleaseResourceForThreadLite( &(s->rwsem), @@ -472,20 +471,20 @@ static inline void up_read(struct rw_semaphore *s) /* - * up_write - * To release write-lock of the rw_semahore + * cfs_up_write + * To release write-lock of the cfs_rw_semaphore * * Arguments: - * rwsem: pointer to the rw_semaphore_t structure + * rwsem: pointer to the cfs_rw_semaphore_t structure * * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void up_write(struct rw_semaphore *s) +static inline void cfs_up_write(cfs_rw_semaphore_t *s) { ExReleaseResourceForThreadLite( &(s->rwsem), @@ -503,28 +502,39 @@ static inline void up_write(struct rw_semaphore *s) */ typedef struct { - spinlock_t guard; - int count; -} rwlock_t; + cfs_spinlock_t guard; + int count; +} cfs_rwlock_t; + +void cfs_rwlock_init(cfs_rwlock_t * rwlock); +void cfs_rwlock_fini(cfs_rwlock_t * rwlock); + +void cfs_read_lock(cfs_rwlock_t * rwlock); +void cfs_read_unlock(cfs_rwlock_t * rwlock); +void cfs_write_lock(cfs_rwlock_t * rwlock); +void cfs_write_unlock(cfs_rwlock_t * rwlock); -void rwlock_init(rwlock_t * rwlock); -void rwlock_fini(rwlock_t * rwlock); +#define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0) +#define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0) +#define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0) +#define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0) -void read_lock(rwlock_t * rwlock); -void read_unlock(rwlock_t * rwlock); -void write_lock(rwlock_t * rwlock); -void write_unlock(rwlock_t * rwlock); +#define cfs_write_lock_bh cfs_write_lock +#define cfs_write_unlock_bh cfs_write_unlock -#define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0) -#define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0) -#define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0) -#define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0) +typedef struct cfs_lock_class_key { + int foo; +} cfs_lock_class_key_t; -#define write_lock_bh write_lock -#define write_unlock_bh write_unlock +#define cfs_lockdep_set_class(lock, class) do {} while(0) -struct lock_class_key {int foo;}; -#define lockdep_set_class(lock, class) do {} while(0) +static inline void cfs_lockdep_off(void) +{ +} + +static inline void cfs_lockdep_on(void) +{ +} /* * Semaphore @@ -534,27 +544,27 @@ struct lock_class_key {int foo;}; * - __up(x) */ -struct semaphore{ +typedef struct cfs_semaphore { KSEMAPHORE sem; -}; +} cfs_semaphore_t; -static inline void sema_init(struct semaphore *s, int val) +static inline void cfs_sema_init(cfs_semaphore_t *s, int val) { KeInitializeSemaphore(&s->sem, val, val); } -static inline void __down(struct semaphore *s) +static inline void __down(cfs_semaphore_t *s) { KeWaitForSingleObject( &(s->sem), Executive, KernelMode, FALSE, NULL ); } -static inline void __up(struct semaphore *s) +static inline void __up(cfs_semaphore_t *s) { KeReleaseSemaphore(&s->sem, 0, 1, FALSE); } -static inline int down_trylock(struct semaphore * s) +static inline int down_trylock(cfs_semaphore_t *s) { LARGE_INTEGER timeout = {0}; NTSTATUS status = @@ -577,10 +587,9 @@ static inline int down_trylock(struct semaphore * s) * - mutex_down(x) */ -#define mutex semaphore -typedef struct semaphore mutex_t; +typedef struct cfs_semaphore cfs_mutex_t; -#define DECLARE_MUTEX(x) mutex_t x +#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x /* * init_mutex @@ -592,16 +601,15 @@ typedef struct semaphore mutex_t; * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -#define mutex_init init_mutex -static inline void init_mutex(mutex_t *mutex) +#define cfs_mutex_init cfs_init_mutex +static inline void cfs_init_mutex(cfs_mutex_t *mutex) { - sema_init(mutex, 1); + cfs_sema_init(mutex, 1); } -#define init_MUTEX init_mutex /* * mutex_down * To acquire the mutex lock @@ -612,19 +620,26 @@ static inline void init_mutex(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void mutex_down(mutex_t *mutex) +static inline void cfs_mutex_down(cfs_mutex_t *mutex) { __down(mutex); } -#define mutex_lock(m) mutex_down(m) -#define mutex_trylock(s) down_trylock(s) -#define mutex_lock_nested(m) mutex_down(m) -#define down(m) mutex_down(m) +static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex) +{ + __down(mutex); + return 0; +} + +#define cfs_mutex_lock(m) cfs_mutex_down(m) +#define cfs_mutex_trylock(s) down_trylock(s) +#define cfs_mutex_lock_nested(m) cfs_mutex_down(m) +#define cfs_down(m) cfs_mutex_down(m) +#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m) /* * mutex_up @@ -636,17 +651,17 @@ static inline void mutex_down(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void mutex_up(mutex_t *mutex) +static inline void cfs_mutex_up(cfs_mutex_t *mutex) { __up(mutex); } -#define mutex_unlock(m) mutex_up(m) -#define up(m) mutex_up(m) +#define cfs_mutex_unlock(m) cfs_mutex_up(m) +#define cfs_up(m) cfs_mutex_up(m) /* * init_mutex_locked @@ -658,19 +673,17 @@ static inline void mutex_up(mutex_t *mutex) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void init_mutex_locked(mutex_t *mutex) +static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex) { - init_mutex(mutex); - mutex_down(mutex); + cfs_init_mutex(mutex); + cfs_mutex_down(mutex); } -#define init_MUTEX_LOCKED init_mutex_locked - -static inline void mutex_destroy(mutex_t *mutex) +static inline void cfs_mutex_destroy(cfs_mutex_t *mutex) { } @@ -682,9 +695,9 @@ static inline void mutex_destroy(mutex_t *mutex) * - wait_for_completion(c) */ -struct completion { +typedef struct { event_t event; -}; +} cfs_completion_t; /* @@ -697,11 +710,11 @@ struct completion { * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void init_completion(struct completion *c) +static inline void cfs_init_completion(cfs_completion_t *c) { cfs_init_event(&(c->event), 1, FALSE); } @@ -717,11 +730,11 @@ static inline void init_completion(struct completion *c) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void complete(struct completion *c) +static inline void cfs_complete(cfs_completion_t *c) { cfs_wake_event(&(c->event)); } @@ -737,16 +750,16 @@ static inline void complete(struct completion *c) * Return Value: * N/A * - * Notes: + * Notes: * N/A */ -static inline void wait_for_completion(struct completion *c) +static inline void cfs_wait_for_completion(cfs_completion_t *c) { cfs_wait_event_internal(&(c->event), 0); } -static inline int wait_for_completion_interruptible(struct completion *c) +static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c) { cfs_wait_event_internal(&(c->event), 0); return 0;