1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/winnt/winnt-lock.h
38 * Basic library routines.
41 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
42 #define __LIBCFS_WINNT_CFS_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
54 * All locks' declaration are not guaranteed to be initialized,
55 * Althought some of they are initialized in Linux. All locks
56 * declared by CFS_DECL_* should be initialized explicitly.
60 * spinlock & event definitions
63 typedef struct spin_lock spinlock_t;
67 typedef struct { volatile int counter; } atomic_t;
69 #define ATOMIC_INIT(i) { i }
71 #define atomic_read(v) ((v)->counter)
72 #define atomic_set(v,i) (((v)->counter) = (i))
74 void FASTCALL atomic_add(int i, atomic_t *v);
75 void FASTCALL atomic_sub(int i, atomic_t *v);
77 int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
79 void FASTCALL atomic_inc(atomic_t *v);
80 void FASTCALL atomic_dec(atomic_t *v);
82 int FASTCALL atomic_dec_and_test(atomic_t *v);
83 int FASTCALL atomic_inc_and_test(atomic_t *v);
85 int FASTCALL atomic_add_return(int i, atomic_t *v);
86 int FASTCALL atomic_sub_return(int i, atomic_t *v);
88 #define atomic_inc_return(v) atomic_add_return(1, v)
89 #define atomic_dec_return(v) atomic_sub_return(1, v)
91 int FASTCALL atomic_dec_and_lock(atomic_t *v, spinlock_t *lock);
95 typedef KEVENT event_t;
99 * To initialize the event object
102 * event: pointer to the event object
103 * type: Non Zero: SynchronizationEvent
104 * Zero: NotificationEvent
105 * status: the initial stats of the event
116 cfs_init_event(event_t *event, int type, int status)
120 (type) ? SynchronizationEvent: NotificationEvent,
121 (status) ? TRUE : FALSE
126 * cfs_wait_event_internal
127 * To wait on an event to syncrhonize the process
130 * event: pointer to the event object
131 * timeout: the timeout for waitting or 0 means infinite time.
134 * Zero: waiting timeouts
135 * Non Zero: event signaled ...
141 static inline int64_t
142 cfs_wait_event_internal(event_t * event, int64_t timeout)
145 LARGE_INTEGER TimeOut;
147 TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
149 Status = KeWaitForSingleObject(
154 (timeout != 0) ? (&TimeOut) : (NULL)
157 if (Status == STATUS_TIMEOUT) {
161 return TRUE; // signaled case
166 * To signal the event object
169 * event: pointer to the event object
179 cfs_wake_event(event_t * event)
181 return (KeSetEvent(event, 0, FALSE) != 0);
186 * To clear/reset the status of the event object
189 * event: pointer to the event object
199 cfs_clear_event(event_t * event)
205 * spin lock defintions / routines
211 * for spinlock operations, try to grab nesting acquisition of
212 * spinlock will cause dead-lock in MP system and current irql
213 * overwritten for UP system. (UP system could allow nesting spin
214 * acqisition, because it's not spin at all just raising the irql.)
223 #define CFS_DECL_SPIN(name) spinlock_t name;
224 #define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
226 #define SPIN_LOCK_UNLOCKED {0}
228 static inline void spin_lock_init(spinlock_t *lock)
230 KeInitializeSpinLock(&(lock->lock));
233 static inline void spin_lock(spinlock_t *lock)
235 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
238 static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
240 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
243 static inline void spin_unlock(spinlock_t *lock)
245 KIRQL irql = lock->irql;
246 KeReleaseSpinLock(&(lock->lock), irql);
250 #define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0)
251 #define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0)
254 /* There's no corresponding routine in windows kernel.
255 We must realize a light one of our own. But there's
256 no way to identify the system is MP build or UP build
257 on the runtime. We just uses a workaround for it. */
259 extern int libcfs_mp_system;
261 static int spin_trylock(spinlock_t *lock)
266 ASSERT(lock != NULL);
268 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
270 if (libcfs_mp_system) {
271 if (0 == (ulong_ptr_t)lock->lock) {
274 mov edx, dword ptr [ebp + 8]
275 lock bts dword ptr[edx], 0
298 static int spin_is_locked(spinlock_t *lock)
300 #if _WIN32_WINNT >= 0x502
301 /* KeTestSpinLock only avalilable on 2k3 server or later */
302 return (!KeTestSpinLock(&lock->lock));
304 return (int) (lock->lock);
308 /* synchronization between cpus: it will disable all DPCs
309 kernel task scheduler on the CPU */
310 #define spin_lock_bh(x) spin_lock(x)
311 #define spin_unlock_bh(x) spin_unlock(x)
312 #define spin_lock_bh_init(x) spin_lock_init(x)
315 * rw_semaphore (using ERESOURCE)
319 typedef struct rw_semaphore {
324 #define CFS_DECL_RWSEM(name) rw_semaphore_t name
325 #define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
326 #define DECLARE_RWSEM CFS_DECL_RWSEM
330 * To initialize the the rw_semaphore_t structure
333 * rwsem: pointer to the rw_semaphore_t structure
342 static inline void init_rwsem(rw_semaphore_t *s)
344 ExInitializeResourceLite(&s->rwsem);
346 #define rwsem_init init_rwsem
350 * To finilize/destroy the the rw_semaphore_t structure
353 * rwsem: pointer to the rw_semaphore_t structure
359 * For winnt system, we need this routine to delete the ERESOURCE.
360 * Just define it NULL for other systems.
363 static inline void fini_rwsem(rw_semaphore_t *s)
365 ExDeleteResourceLite(&s->rwsem);
367 #define rwsem_fini fini_rwsem
371 * To acquire read-lock of the rw_semahore
374 * rwsem: pointer to the rw_semaphore_t structure
383 static inline void down_read(struct rw_semaphore *s)
385 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
387 #define down_read_nested down_read
392 * To acquire read-lock of the rw_semahore without blocking
395 * rwsem: pointer to the rw_semaphore_t structure
398 * Zero: failed to acquire the read lock
399 * Non-Zero: succeeded to acquire the read lock
402 * This routine will return immediately without waiting.
405 static inline int down_read_trylock(struct rw_semaphore *s)
407 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
413 * To acquire write-lock of the rw_semahore
416 * rwsem: pointer to the rw_semaphore_t structure
425 static inline void down_write(struct rw_semaphore *s)
427 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
429 #define down_write_nested down_write
433 * To acquire write-lock of the rw_semahore without blocking
436 * rwsem: pointer to the rw_semaphore_t structure
439 * Zero: failed to acquire the write lock
440 * Non-Zero: succeeded to acquire the read lock
443 * This routine will return immediately without waiting.
446 static inline int down_write_trylock(struct rw_semaphore *s)
448 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
454 * To release read-lock of the rw_semahore
457 * rwsem: pointer to the rw_semaphore_t structure
466 static inline void up_read(struct rw_semaphore *s)
468 ExReleaseResourceForThreadLite(
470 ExGetCurrentResourceThread());
476 * To release write-lock of the rw_semahore
479 * rwsem: pointer to the rw_semaphore_t structure
488 static inline void up_write(struct rw_semaphore *s)
490 ExReleaseResourceForThreadLite(
492 ExGetCurrentResourceThread());
496 * rwlock_t (using sempahore)
510 void rwlock_init(rwlock_t * rwlock);
511 void rwlock_fini(rwlock_t * rwlock);
513 void read_lock(rwlock_t * rwlock);
514 void read_unlock(rwlock_t * rwlock);
515 void write_lock(rwlock_t * rwlock);
516 void write_unlock(rwlock_t * rwlock);
518 #define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0)
519 #define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0)
520 #define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0)
521 #define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0)
523 #define write_lock_bh write_lock
524 #define write_unlock_bh write_unlock
526 struct lock_class_key {int foo;};
527 #define lockdep_set_class(lock, class) do {} while(0)
541 static inline void sema_init(struct semaphore *s, int val)
543 KeInitializeSemaphore(&s->sem, val, val);
546 static inline void __down(struct semaphore *s)
548 KeWaitForSingleObject( &(s->sem), Executive,
549 KernelMode, FALSE, NULL );
552 static inline void __up(struct semaphore *s)
554 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
557 static inline int down_trylock(struct semaphore * s)
559 LARGE_INTEGER timeout = {0};
561 KeWaitForSingleObject( &(s->sem), Executive,
562 KernelMode, FALSE, &timeout);
564 if (status == STATUS_SUCCESS) {
575 * - init_mutex_locked(x)
580 #define mutex semaphore
581 typedef struct semaphore mutex_t;
583 #define DECLARE_MUTEX(x) mutex_t x
587 * To initialize a mutex_t structure
590 * mutex: pointer to the mutex_t structure
598 #define mutex_init init_mutex
599 static inline void init_mutex(mutex_t *mutex)
604 #define init_MUTEX init_mutex
607 * To acquire the mutex lock
610 * mutex: pointer to the mutex_t structure
619 static inline void mutex_down(mutex_t *mutex)
624 #define mutex_lock(m) mutex_down(m)
625 #define mutex_trylock(s) down_trylock(s)
626 #define mutex_lock_nested(m) mutex_down(m)
627 #define down(m) mutex_down(m)
631 * To release the mutex lock (acquired already)
634 * mutex: pointer to the mutex_t structure
643 static inline void mutex_up(mutex_t *mutex)
648 #define mutex_unlock(m) mutex_up(m)
649 #define up(m) mutex_up(m)
653 * To initialize the mutex as acquired state
656 * mutex: pointer to the mutex_t structure
665 static inline void init_mutex_locked(mutex_t *mutex)
671 #define init_MUTEX_LOCKED init_mutex_locked
673 static inline void mutex_destroy(mutex_t *mutex)
680 * - init_complition(c)
682 * - wait_for_completion(c)
692 * To initialize the completion object
695 * c: pointer to the completion structure
704 static inline void init_completion(struct completion *c)
706 cfs_init_event(&(c->event), 1, FALSE);
712 * To complete/signal the completion object
715 * c: pointer to the completion structure
724 static inline void complete(struct completion *c)
726 cfs_wake_event(&(c->event));
730 * wait_for_completion
731 * To wait on the completion object. If the event is signaled,
732 * this function will return to the call with the event un-singled.
735 * c: pointer to the completion structure
744 static inline void wait_for_completion(struct completion *c)
746 cfs_wait_event_internal(&(c->event), 0);
749 static inline int wait_for_completion_interruptible(struct completion *c)
751 cfs_wait_event_internal(&(c->event), 0);
755 #else /* !__KERNEL__ */
756 #endif /* !__KERNEL__ */