4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * libcfs/include/libcfs/winnt/winnt-lock.h
36 * Basic library routines.
39 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
40 #define __LIBCFS_WINNT_CFS_LOCK_H__
42 #ifndef __LIBCFS_LIBCFS_H__
43 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
52 * All locks' declaration are not guaranteed to be initialized,
53 * Althought some of they are initialized in Linux. All locks
54 * declared by CFS_DECL_* should be initialized explicitly.
58 * spinlock & event definitions
61 typedef struct cfs_spin_lock cfs_spinlock_t;
65 typedef struct { volatile int counter; } cfs_atomic_t;
67 #define CFS_ATOMIC_INIT(i) { i }
69 #define cfs_atomic_read(v) ((v)->counter)
70 #define cfs_atomic_set(v,i) (((v)->counter) = (i))
72 void FASTCALL cfs_atomic_add(int i, cfs_atomic_t *v);
73 void FASTCALL cfs_atomic_sub(int i, cfs_atomic_t *v);
75 int FASTCALL cfs_atomic_sub_and_test(int i, cfs_atomic_t *v);
77 void FASTCALL cfs_atomic_inc(cfs_atomic_t *v);
78 void FASTCALL cfs_atomic_dec(cfs_atomic_t *v);
80 int FASTCALL cfs_atomic_dec_and_test(cfs_atomic_t *v);
81 int FASTCALL cfs_atomic_inc_and_test(cfs_atomic_t *v);
83 int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v);
84 int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
86 #define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
87 #define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
89 int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
93 typedef KEVENT event_t;
97 * To initialize the event object
100 * event: pointer to the event object
101 * type: Non Zero: SynchronizationEvent
102 * Zero: NotificationEvent
103 * status: the initial stats of the event
114 cfs_init_event(event_t *event, int type, int status)
118 (type) ? SynchronizationEvent: NotificationEvent,
119 (status) ? TRUE : FALSE
124 * cfs_wait_event_internal
125 * To wait on an event to syncrhonize the process
128 * event: pointer to the event object
129 * timeout: the timeout for waitting or 0 means infinite time.
132 * Zero: waiting timeouts
133 * Non Zero: event signaled ...
139 static inline int64_t
140 cfs_wait_event_internal(event_t * event, int64_t timeout)
143 LARGE_INTEGER TimeOut;
145 TimeOut.QuadPart = -1 * (10000000/CFS_HZ) * timeout;
147 Status = KeWaitForSingleObject(
152 (timeout != 0) ? (&TimeOut) : (NULL)
155 if (Status == STATUS_TIMEOUT) {
159 return TRUE; // signaled case
164 * To signal the event object
167 * event: pointer to the event object
177 cfs_wake_event(event_t * event)
179 return (KeSetEvent(event, 0, FALSE) != 0);
184 * To clear/reset the status of the event object
187 * event: pointer to the event object
197 cfs_clear_event(event_t * event)
203 * spin lock defintions / routines
209 * for spinlock operations, try to grab nesting acquisition of
210 * spinlock will cause dead-lock in MP system and current irql
211 * overwritten for UP system. (UP system could allow nesting spin
212 * acqisition, because it's not spin at all just raising the irql.)
216 struct cfs_spin_lock {
221 #define CFS_DECL_SPIN(name) cfs_spinlock_t name;
222 #define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name;
224 #define CFS_SPIN_LOCK_UNLOCKED {0}
226 static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
228 KeInitializeSpinLock(&(lock->lock));
231 static inline void cfs_spin_lock(cfs_spinlock_t *lock)
233 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
236 static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
238 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
241 static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
243 KIRQL irql = lock->irql;
244 KeReleaseSpinLock(&(lock->lock), irql);
248 #define cfs_spin_lock_irqsave(lock, flags) \
249 do {(flags) = 0; cfs_spin_lock(lock);} while(0)
251 #define cfs_spin_unlock_irqrestore(lock, flags) \
252 do {cfs_spin_unlock(lock);} while(0)
255 /* There's no corresponding routine in windows kernel.
256 We must realize a light one of our own. But there's
257 no way to identify the system is MP build or UP build
258 on the runtime. We just uses a workaround for it. */
260 extern int libcfs_mp_system;
262 static int cfs_spin_trylock(cfs_spinlock_t *lock)
267 ASSERT(lock != NULL);
269 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
271 if (libcfs_mp_system) {
272 if (0 == (ulong_ptr_t)lock->lock) {
275 mov edx, dword ptr [ebp + 8]
276 lock bts dword ptr[edx], 0
299 static int cfs_spin_is_locked(cfs_spinlock_t *lock)
301 #if _WIN32_WINNT >= 0x502
302 /* KeTestSpinLock only avalilable on 2k3 server or later */
303 return (!KeTestSpinLock(&lock->lock));
305 return (int) (lock->lock);
309 /* synchronization between cpus: it will disable all DPCs
310 kernel task scheduler on the CPU */
311 #define cfs_spin_lock_bh(x) cfs_spin_lock(x)
312 #define cfs_spin_unlock_bh(x) cfs_spin_unlock(x)
313 #define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x)
316 * cfs_rw_semaphore (using ERESOURCE)
320 typedef struct cfs_rw_semaphore {
322 } cfs_rw_semaphore_t;
325 #define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
326 #define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
330 * To initialize the the cfs_rw_semaphore_t structure
333 * rwsem: pointer to the cfs_rw_semaphore_t structure
342 static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
344 ExInitializeResourceLite(&s->rwsem);
346 #define rwsem_init cfs_init_rwsem
350 * To finilize/destroy the the cfs_rw_semaphore_t structure
353 * rwsem: pointer to the cfs_rw_semaphore_t structure
359 * For winnt system, we need this routine to delete the ERESOURCE.
360 * Just define it NULL for other systems.
363 static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
365 ExDeleteResourceLite(&s->rwsem);
370 * To acquire read-lock of the cfs_rw_semaphore
373 * rwsem: pointer to the cfs_rw_semaphore_t structure
382 static inline void cfs_down_read(cfs_rw_semaphore_t *s)
384 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
386 #define cfs_down_read_nested cfs_down_read
390 * cfs_down_read_trylock
391 * To acquire read-lock of the cfs_rw_semaphore without blocking
394 * rwsem: pointer to the cfs_rw_semaphore_t structure
397 * Zero: failed to acquire the read lock
398 * Non-Zero: succeeded to acquire the read lock
401 * This routine will return immediately without waiting.
404 static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
406 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
412 * To acquire write-lock of the cfs_rw_semaphore
415 * rwsem: pointer to the cfs_rw_semaphore_t structure
424 static inline void cfs_down_write(cfs_rw_semaphore_t *s)
426 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
428 #define cfs_down_write_nested cfs_down_write
432 * To acquire write-lock of the cfs_rw_semaphore without blocking
435 * rwsem: pointer to the cfs_rw_semaphore_t structure
438 * Zero: failed to acquire the write lock
439 * Non-Zero: succeeded to acquire the read lock
442 * This routine will return immediately without waiting.
445 static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
447 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
453 * To release read-lock of the cfs_rw_semaphore
456 * rwsem: pointer to the cfs_rw_semaphore_t structure
465 static inline void cfs_up_read(cfs_rw_semaphore_t *s)
467 ExReleaseResourceForThreadLite(
469 ExGetCurrentResourceThread());
475 * To release write-lock of the cfs_rw_semaphore
478 * rwsem: pointer to the cfs_rw_semaphore_t structure
487 static inline void cfs_up_write(cfs_rw_semaphore_t *s)
489 ExReleaseResourceForThreadLite(
491 ExGetCurrentResourceThread());
495 * rwlock_t (using sempahore)
505 cfs_spinlock_t guard;
509 void cfs_rwlock_init(cfs_rwlock_t * rwlock);
510 void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
512 void cfs_read_lock(cfs_rwlock_t * rwlock);
513 void cfs_read_unlock(cfs_rwlock_t * rwlock);
514 void cfs_write_lock(cfs_rwlock_t * rwlock);
515 void cfs_write_unlock(cfs_rwlock_t * rwlock);
517 #define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0)
518 #define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0)
519 #define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0)
520 #define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0)
522 #define cfs_write_lock_bh cfs_write_lock
523 #define cfs_write_unlock_bh cfs_write_unlock
525 typedef struct cfs_lock_class_key {
527 } cfs_lock_class_key_t;
529 #define cfs_lockdep_set_class(lock, class) do {} while(0)
531 static inline void cfs_lockdep_off(void)
535 static inline void cfs_lockdep_on(void)
547 typedef struct cfs_semaphore {
551 static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
553 KeInitializeSemaphore(&s->sem, val, val);
556 static inline void __down(cfs_semaphore_t *s)
558 KeWaitForSingleObject( &(s->sem), Executive,
559 KernelMode, FALSE, NULL );
562 static inline void __up(cfs_semaphore_t *s)
564 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
567 static inline int down_trylock(cfs_semaphore_t *s)
569 LARGE_INTEGER timeout = {0};
571 KeWaitForSingleObject( &(s->sem), Executive,
572 KernelMode, FALSE, &timeout);
574 if (status == STATUS_SUCCESS) {
585 * - init_mutex_locked(x)
590 typedef struct cfs_semaphore cfs_mutex_t;
592 #define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
596 * To initialize a mutex_t structure
599 * mutex: pointer to the mutex_t structure
607 #define cfs_mutex_init cfs_init_mutex
608 static inline void cfs_init_mutex(cfs_mutex_t *mutex)
610 cfs_sema_init(mutex, 1);
615 * To acquire the mutex lock
618 * mutex: pointer to the mutex_t structure
627 static inline void cfs_mutex_down(cfs_mutex_t *mutex)
632 static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
638 #define cfs_mutex_lock(m) cfs_mutex_down(m)
639 #define cfs_mutex_trylock(s) down_trylock(s)
640 #define cfs_mutex_lock_nested(m) cfs_mutex_down(m)
641 #define cfs_down(m) cfs_mutex_down(m)
642 #define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m)
646 * To release the mutex lock (acquired already)
649 * mutex: pointer to the mutex_t structure
658 static inline void cfs_mutex_up(cfs_mutex_t *mutex)
663 #define cfs_mutex_unlock(m) cfs_mutex_up(m)
664 #define cfs_up(m) cfs_mutex_up(m)
668 * To initialize the mutex as acquired state
671 * mutex: pointer to the mutex_t structure
680 static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
682 cfs_init_mutex(mutex);
683 cfs_mutex_down(mutex);
686 static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
693 * - init_complition(c)
695 * - wait_for_completion(c)
705 * To initialize the completion object
708 * c: pointer to the completion structure
717 static inline void cfs_init_completion(cfs_completion_t *c)
719 cfs_init_event(&(c->event), 1, FALSE);
725 * To complete/signal the completion object
728 * c: pointer to the completion structure
737 static inline void cfs_complete(cfs_completion_t *c)
739 cfs_wake_event(&(c->event));
743 * wait_for_completion
744 * To wait on the completion object. If the event is signaled,
745 * this function will return to the call with the event un-singled.
748 * c: pointer to the completion structure
757 static inline void cfs_wait_for_completion(cfs_completion_t *c)
759 cfs_wait_event_internal(&(c->event), 0);
762 static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
764 cfs_wait_event_internal(&(c->event), 0);
768 #else /* !__KERNEL__ */
769 #endif /* !__KERNEL__ */