4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/winnt/winnt-lock.h
38 * Basic library routines.
41 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
42 #define __LIBCFS_WINNT_CFS_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
54 * All locks' declaration are not guaranteed to be initialized,
55 * Althought some of they are initialized in Linux. All locks
56 * declared by CFS_DECL_* should be initialized explicitly.
60 * spinlock & event definitions
63 typedef struct spin_lock spinlock_t;
67 typedef struct { volatile int counter; } cfs_atomic_t;
69 #define CFS_ATOMIC_INIT(i) { i }
71 #define cfs_atomic_read(v) ((v)->counter)
72 #define cfs_atomic_set(v,i) (((v)->counter) = (i))
74 void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
75 void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
77 int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
79 void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
80 void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
82 int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
83 int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
85 int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
86 int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
88 #define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
89 #define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
91 int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
95 typedef KEVENT event_t;
99 * To initialize the event object
102 * event: pointer to the event object
103 * type: Non Zero: SynchronizationEvent
104 * Zero: NotificationEvent
105 * status: the initial stats of the event
116 cfs_init_event(event_t *event, int type, int status)
120 (type) ? SynchronizationEvent: NotificationEvent,
121 (status) ? TRUE : FALSE
126 * cfs_wait_event_internal
127 * To wait on an event to syncrhonize the process
130 * event: pointer to the event object
131 * timeout: the timeout for waitting or 0 means infinite time.
134 * Zero: waiting timeouts
135 * Non Zero: event signaled ...
141 static inline int64_t
142 cfs_wait_event_internal(event_t * event, int64_t timeout)
145 LARGE_INTEGER TimeOut;
147 TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
149 Status = KeWaitForSingleObject(
154 (timeout != 0) ? (&TimeOut) : (NULL)
157 if (Status == STATUS_TIMEOUT) {
161 return TRUE; // signaled case
166 * To signal the event object
169 * event: pointer to the event object
179 cfs_wake_event(event_t * event)
181 return (KeSetEvent(event, 0, FALSE) != 0);
186 * To clear/reset the status of the event object
189 * event: pointer to the event object
199 cfs_clear_event(event_t * event)
205 * spin lock defintions / routines
211 * for spinlock operations, try to grab nesting acquisition of
212 * spinlock will cause dead-lock in MP system and current irql
213 * overwritten for UP system. (UP system could allow nesting spin
214 * acqisition, because it's not spin at all just raising the irql.)
223 #define CFS_DECL_SPIN(name) spinlock_t name;
224 #define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
226 #define DEFINE_SPINLOCK {0}
228 static inline void spin_lock_init(spinlock_t *lock)
230 KeInitializeSpinLock(&(lock->lock));
233 static inline void spin_lock(spinlock_t *lock)
235 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
238 static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
240 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
243 static inline void spin_unlock(spinlock_t *lock)
245 KIRQL irql = lock->irql;
246 KeReleaseSpinLock(&(lock->lock), irql);
250 #define spin_lock_irqsave(lock, flags) \
251 do { (flags) = 0; spin_lock(lock); } while (0)
253 #define spin_unlock_irqrestore(lock, flags) \
254 do { spin_unlock(lock); } while (0)
257 /* There's no corresponding routine in windows kernel.
258 We must realize a light one of our own. But there's
259 no way to identify the system is MP build or UP build
260 on the runtime. We just uses a workaround for it. */
262 extern int libcfs_mp_system;
264 static int spin_trylock(spinlock_t *lock)
269 ASSERT(lock != NULL);
271 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
273 if (libcfs_mp_system) {
274 if (0 == (ulong_ptr_t)lock->lock) {
277 mov edx, dword ptr [ebp + 8]
278 lock bts dword ptr[edx], 0
301 static int spin_is_locked(spinlock_t *lock)
303 #if _WIN32_WINNT >= 0x502
304 /* KeTestSpinLock only avalilable on 2k3 server or later */
305 return !KeTestSpinLock(&lock->lock);
307 return (int) (lock->lock);
311 /* synchronization between cpus: it will disable all DPCs
312 kernel task scheduler on the CPU */
313 #define spin_lock_bh(x) spin_lock(x)
314 #define spin_unlock_bh(x) spin_unlock(x)
315 #define spin_lock_bh_init(x) spin_lock_init(x)
318 * rw_semaphore (using ERESOURCE)
322 struct rw_semaphore {
327 #define DECLARE_RWSEM(name) struct rw_semaphore name
328 #define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
332 * To initialize the the rw_semaphore structure
335 * rwsem: pointer to the rw_semaphore structure
344 static inline void init_rwsem(struct rw_semaphore *s)
346 ExInitializeResourceLite(&s->rwsem);
348 #define rwsem_init init_rwsem
352 * To finilize/destroy the the rw_semaphore structure
355 * rwsem: pointer to the rw_semaphore structure
361 * For winnt system, we need this routine to delete the ERESOURCE.
362 * Just define it NULL for other systems.
365 static inline void fini_rwsem(struct rw_semaphore *s)
367 ExDeleteResourceLite(&s->rwsem);
372 * To acquire read-lock of the rw_semaphore
375 * rwsem: pointer to the struct rw_semaphore
384 static inline void down_read(struct rw_semaphore *s)
386 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
388 #define down_read_nested down_read
393 * To acquire read-lock of the rw_semaphore without blocking
396 * rwsem: pointer to the struct rw_semaphore
399 * Zero: failed to acquire the read lock
400 * Non-Zero: succeeded to acquire the read lock
403 * This routine will return immediately without waiting.
406 static inline int down_read_trylock(struct rw_semaphore *s)
408 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
414 * To acquire write-lock of the struct rw_semaphore
417 * rwsem: pointer to the struct rw_semaphore
426 static inline void down_write(struct rw_semaphore *s)
428 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
430 #define down_write_nested down_write
434 * To acquire write-lock of the rw_semaphore without blocking
437 * rwsem: pointer to the struct rw_semaphore
440 * Zero: failed to acquire the write lock
441 * Non-Zero: succeeded to acquire the read lock
444 * This routine will return immediately without waiting.
447 static inline int down_write_trylock(struct rw_semaphore *s)
449 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
455 * To release read-lock of the rw_semaphore
458 * rwsem: pointer to the struct rw_semaphore
467 static inline void up_read(struct rw_semaphore *s)
469 ExReleaseResourceForThreadLite(&(s->rwsem),
470 ExGetCurrentResourceThread());
476 * To release write-lock of the rw_semaphore
479 * rwsem: pointer to the struct rw_semaphore
488 static inline void up_write(struct rw_semaphore *s)
490 ExReleaseResourceForThreadLite(&(s->rwsem),
491 ExGetCurrentResourceThread());
495 * rwlock_t (using sempahore)
509 void rwlock_init(rwlock_t *rwlock);
510 void cfs_rwlock_fini(rwlock_t *rwlock);
512 void read_lock(rwlock_t *rwlock);
513 void read_unlock(rwlock_t *rwlock);
514 void write_lock(rwlock_t *rwlock);
515 void write_unlock(rwlock_t *rwlock);
517 #define write_lock_irqsave(l, f) do { f = 0; write_lock(l); } while (0)
518 #define write_unlock_irqrestore(l, f) do { write_unlock(l); } while (0)
519 #define read_lock_irqsave(l, f) do { f = 0; read_lock(l); } while (0)
520 #define read_unlock_irqrestore(l, f) do { read_unlock(l); } while (0)
522 #define write_lock_bh write_lock
523 #define write_unlock_bh write_unlock
525 struct lock_class_key {
529 #define lockdep_set_class(lock, class) do {} while (0)
531 static inline void lockdep_off(void)
535 static inline void lockdep_on(void)
551 static inline void sema_init(struct semaphore *s, int val)
553 KeInitializeSemaphore(&s->sem, val, val);
556 static inline void __down(struct semaphore *s)
558 KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
561 static inline void __up(struct semaphore *s)
563 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
566 static inline int down_trylock(struct semaphore *s)
568 LARGE_INTEGER timeout = {0};
569 NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
570 KernelMode, FALSE, &timeout);
572 if (status == STATUS_SUCCESS)
582 * - init_mutex_locked(x)
587 #define mutex semaphore
589 #define CFS_DECLARE_MUTEX(x) struct mutex x
593 * To initialize a mutex_t structure
596 * mutex: pointer to the mutex_t structure
604 #define mutex_init cfs_init_mutex
605 static inline void cfs_init_mutex(struct mutex *mutex)
612 * To acquire the mutex lock
615 * mutex: pointer to the mutex_t structure
624 static inline void cfs_mutex_down(struct mutex *mutex)
629 static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
635 #define mutex_lock(m) cfs_mutex_down(m)
636 #define mutex_trylock(s) down_trylock(s)
637 #define mutex_lock_nested(m) cfs_mutex_down(m)
638 #define down(m) cfs_mutex_down(m)
639 #define down_interruptible(m) cfs_mutex_down_interruptible(m)
643 * To release the mutex lock (acquired already)
646 * mutex: pointer to the mutex_t structure
655 static inline void cfs_mutex_up(struct mutex *mutex)
660 #define mutex_unlock(m) cfs_mutex_up(m)
661 #define up(m) cfs_mutex_up(m)
665 * To initialize the mutex as acquired state
668 * mutex: pointer to the mutex_t structure
677 static inline void cfs_init_mutex_locked(struct mutex *mutex)
679 cfs_init_mutex(mutex);
680 cfs_mutex_down(mutex);
683 static inline void mutex_destroy(struct mutex *mutex)
690 * - init_complition(c)
692 * - wait_for_completion(c)
702 * To initialize the completion object
705 * c: pointer to the completion structure
714 static inline void init_completion(struct completion *c)
716 cfs_init_event(&(c->event), 1, FALSE);
722 * To complete/signal the completion object
725 * c: pointer to the completion structure
734 static inline void complete(struct completion *c)
736 cfs_wake_event(&(c->event));
740 * wait_for_completion
741 * To wait on the completion object. If the event is signaled,
742 * this function will return to the call with the event un-singled.
745 * c: pointer to the completion structure
754 static inline void wait_for_completion(struct completion *c)
756 cfs_wait_event_internal(&(c->event), 0);
759 static inline int wait_for_completion_interruptible(struct completion *c)
761 cfs_wait_event_internal(&(c->event), 0);
765 #endif /* !__KERNEL__ */