4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * libcfs/include/libcfs/winnt/winnt-lock.h
36 * Basic library routines.
39 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
40 #define __LIBCFS_WINNT_CFS_LOCK_H__
42 #ifndef __LIBCFS_LIBCFS_H__
43 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
52 * All locks' declaration are not guaranteed to be initialized,
53 * Althought some of they are initialized in Linux. All locks
54 * declared by CFS_DECL_* should be initialized explicitly.
58 * spinlock & event definitions
61 typedef struct spin_lock spinlock_t;
65 typedef struct { volatile int counter; } cfs_atomic_t;
67 #define CFS_ATOMIC_INIT(i) { i }
69 #define cfs_atomic_read(v) ((v)->counter)
70 #define cfs_atomic_set(v,i) (((v)->counter) = (i))
72 void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
73 void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
75 int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
77 void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
78 void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
80 int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
81 int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
83 int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
84 int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
86 #define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
87 #define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
89 int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
93 typedef KEVENT event_t;
97 * To initialize the event object
100 * event: pointer to the event object
101 * type: Non Zero: SynchronizationEvent
102 * Zero: NotificationEvent
103 * status: the initial stats of the event
114 cfs_init_event(event_t *event, int type, int status)
118 (type) ? SynchronizationEvent: NotificationEvent,
119 (status) ? TRUE : FALSE
124 * cfs_wait_event_internal
125 * To wait on an event to syncrhonize the process
128 * event: pointer to the event object
129 * timeout: the timeout for waitting or 0 means infinite time.
132 * Zero: waiting timeouts
133 * Non Zero: event signaled ...
139 static inline int64_t
140 cfs_wait_event_internal(event_t * event, int64_t timeout)
143 LARGE_INTEGER TimeOut;
145 TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
147 Status = KeWaitForSingleObject(
152 (timeout != 0) ? (&TimeOut) : (NULL)
155 if (Status == STATUS_TIMEOUT) {
159 return TRUE; // signaled case
164 * To signal the event object
167 * event: pointer to the event object
177 cfs_wake_event(event_t * event)
179 return (KeSetEvent(event, 0, FALSE) != 0);
184 * To clear/reset the status of the event object
187 * event: pointer to the event object
197 cfs_clear_event(event_t * event)
203 * spin lock defintions / routines
209 * for spinlock operations, try to grab nesting acquisition of
210 * spinlock will cause dead-lock in MP system and current irql
211 * overwritten for UP system. (UP system could allow nesting spin
212 * acqisition, because it's not spin at all just raising the irql.)
221 #define CFS_DECL_SPIN(name) spinlock_t name;
222 #define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
224 #define DEFINE_SPINLOCK {0}
226 static inline void spin_lock_init(spinlock_t *lock)
228 KeInitializeSpinLock(&(lock->lock));
231 static inline void spin_lock(spinlock_t *lock)
233 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
236 static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
238 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
241 static inline void spin_unlock(spinlock_t *lock)
243 KIRQL irql = lock->irql;
244 KeReleaseSpinLock(&(lock->lock), irql);
248 #define spin_lock_irqsave(lock, flags) \
249 do { (flags) = 0; spin_lock(lock); } while (0)
251 #define spin_unlock_irqrestore(lock, flags) \
252 do { spin_unlock(lock); } while (0)
255 /* There's no corresponding routine in windows kernel.
256 We must realize a light one of our own. But there's
257 no way to identify the system is MP build or UP build
258 on the runtime. We just uses a workaround for it. */
260 extern int libcfs_mp_system;
262 static int spin_trylock(spinlock_t *lock)
267 ASSERT(lock != NULL);
269 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
271 if (libcfs_mp_system) {
272 if (0 == (ulong_ptr_t)lock->lock) {
275 mov edx, dword ptr [ebp + 8]
276 lock bts dword ptr[edx], 0
299 static int spin_is_locked(spinlock_t *lock)
301 #if _WIN32_WINNT >= 0x502
302 /* KeTestSpinLock only avalilable on 2k3 server or later */
303 return !KeTestSpinLock(&lock->lock);
305 return (int) (lock->lock);
309 /* synchronization between cpus: it will disable all DPCs
310 kernel task scheduler on the CPU */
311 #define spin_lock_bh(x) spin_lock(x)
312 #define spin_unlock_bh(x) spin_unlock(x)
313 #define spin_lock_bh_init(x) spin_lock_init(x)
316 * rw_semaphore (using ERESOURCE)
320 struct rw_semaphore {
325 #define DECLARE_RWSEM(name) struct rw_semaphore name
326 #define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
330 * To initialize the the rw_semaphore structure
333 * rwsem: pointer to the rw_semaphore structure
342 static inline void init_rwsem(struct rw_semaphore *s)
344 ExInitializeResourceLite(&s->rwsem);
346 #define rwsem_init init_rwsem
350 * To finilize/destroy the the rw_semaphore structure
353 * rwsem: pointer to the rw_semaphore structure
359 * For winnt system, we need this routine to delete the ERESOURCE.
360 * Just define it NULL for other systems.
363 static inline void fini_rwsem(struct rw_semaphore *s)
365 ExDeleteResourceLite(&s->rwsem);
370 * To acquire read-lock of the rw_semaphore
373 * rwsem: pointer to the struct rw_semaphore
382 static inline void down_read(struct rw_semaphore *s)
384 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
386 #define down_read_nested down_read
391 * To acquire read-lock of the rw_semaphore without blocking
394 * rwsem: pointer to the struct rw_semaphore
397 * Zero: failed to acquire the read lock
398 * Non-Zero: succeeded to acquire the read lock
401 * This routine will return immediately without waiting.
404 static inline int down_read_trylock(struct rw_semaphore *s)
406 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
412 * To acquire write-lock of the struct rw_semaphore
415 * rwsem: pointer to the struct rw_semaphore
424 static inline void down_write(struct rw_semaphore *s)
426 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
428 #define down_write_nested down_write
432 * To acquire write-lock of the rw_semaphore without blocking
435 * rwsem: pointer to the struct rw_semaphore
438 * Zero: failed to acquire the write lock
439 * Non-Zero: succeeded to acquire the read lock
442 * This routine will return immediately without waiting.
445 static inline int down_write_trylock(struct rw_semaphore *s)
447 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
453 * To release read-lock of the rw_semaphore
456 * rwsem: pointer to the struct rw_semaphore
465 static inline void up_read(struct rw_semaphore *s)
467 ExReleaseResourceForThreadLite(&(s->rwsem),
468 ExGetCurrentResourceThread());
474 * To release write-lock of the rw_semaphore
477 * rwsem: pointer to the struct rw_semaphore
486 static inline void up_write(struct rw_semaphore *s)
488 ExReleaseResourceForThreadLite(&(s->rwsem),
489 ExGetCurrentResourceThread());
493 * rwlock_t (using sempahore)
507 void rwlock_init(rwlock_t *rwlock);
508 void cfs_rwlock_fini(rwlock_t *rwlock);
510 void read_lock(rwlock_t *rwlock);
511 void read_unlock(rwlock_t *rwlock);
512 void write_lock(rwlock_t *rwlock);
513 void write_unlock(rwlock_t *rwlock);
515 #define write_lock_irqsave(l, f) do { f = 0; write_lock(l); } while (0)
516 #define write_unlock_irqrestore(l, f) do { write_unlock(l); } while (0)
517 #define read_lock_irqsave(l, f) do { f = 0; read_lock(l); } while (0)
518 #define read_unlock_irqrestore(l, f) do { read_unlock(l); } while (0)
520 #define write_lock_bh write_lock
521 #define write_unlock_bh write_unlock
523 struct lock_class_key {
527 #define lockdep_set_class(lock, class) do {} while (0)
529 static inline void lockdep_off(void)
533 static inline void lockdep_on(void)
549 static inline void sema_init(struct semaphore *s, int val)
551 KeInitializeSemaphore(&s->sem, val, val);
554 static inline void __down(struct semaphore *s)
556 KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
559 static inline void __up(struct semaphore *s)
561 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
564 static inline int down_trylock(struct semaphore *s)
566 LARGE_INTEGER timeout = {0};
567 NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
568 KernelMode, FALSE, &timeout);
570 if (status == STATUS_SUCCESS)
580 * - init_mutex_locked(x)
585 #define mutex semaphore
587 #define CFS_DECLARE_MUTEX(x) struct mutex x
591 * To initialize a mutex_t structure
594 * mutex: pointer to the mutex_t structure
602 #define mutex_init cfs_init_mutex
603 static inline void cfs_init_mutex(struct mutex *mutex)
610 * To acquire the mutex lock
613 * mutex: pointer to the mutex_t structure
622 static inline void cfs_mutex_down(struct mutex *mutex)
627 static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
633 #define mutex_lock(m) cfs_mutex_down(m)
634 #define mutex_trylock(s) down_trylock(s)
635 #define mutex_lock_nested(m) cfs_mutex_down(m)
636 #define down(m) cfs_mutex_down(m)
637 #define down_interruptible(m) cfs_mutex_down_interruptible(m)
641 * To release the mutex lock (acquired already)
644 * mutex: pointer to the mutex_t structure
653 static inline void cfs_mutex_up(struct mutex *mutex)
658 #define mutex_unlock(m) cfs_mutex_up(m)
659 #define up(m) cfs_mutex_up(m)
663 * To initialize the mutex as acquired state
666 * mutex: pointer to the mutex_t structure
675 static inline void cfs_init_mutex_locked(struct mutex *mutex)
677 cfs_init_mutex(mutex);
678 cfs_mutex_down(mutex);
681 static inline void mutex_destroy(struct mutex *mutex)
688 * - init_complition(c)
690 * - wait_for_completion(c)
700 * To initialize the completion object
703 * c: pointer to the completion structure
712 static inline void init_completion(struct completion *c)
714 cfs_init_event(&(c->event), 1, FALSE);
720 * To complete/signal the completion object
723 * c: pointer to the completion structure
732 static inline void complete(struct completion *c)
734 cfs_wake_event(&(c->event));
738 * wait_for_completion
739 * To wait on the completion object. If the event is signaled,
740 * this function will return to the call with the event un-singled.
743 * c: pointer to the completion structure
752 static inline void wait_for_completion(struct completion *c)
754 cfs_wait_event_internal(&(c->event), 0);
757 static inline int wait_for_completion_interruptible(struct completion *c)
759 cfs_wait_event_internal(&(c->event), 0);
763 #endif /* !__KERNEL__ */