1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/winnt/winnt-lock.h
38 * Basic library routines.
41 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
42 #define __LIBCFS_WINNT_CFS_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
52 * nt specific part ...
58 typedef struct { volatile int counter; } atomic_t;
60 #define ATOMIC_INIT(i) { i }
62 #define atomic_read(v) ((v)->counter)
63 #define atomic_set(v,i) (((v)->counter) = (i))
65 void FASTCALL atomic_add(int i, atomic_t *v);
66 void FASTCALL atomic_sub(int i, atomic_t *v);
68 int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
70 void FASTCALL atomic_inc(atomic_t *v);
71 void FASTCALL atomic_dec(atomic_t *v);
73 int FASTCALL atomic_dec_and_test(atomic_t *v);
74 int FASTCALL atomic_inc_and_test(atomic_t *v);
79 typedef KEVENT event_t;
83 * To initialize the event object
86 * event: pointer to the event object
87 * type: Non Zero: SynchronizationEvent
88 * Zero: NotificationEvent
89 * status: the initial stats of the event
100 cfs_init_event(event_t *event, int type, int status)
104 (type) ? SynchronizationEvent: NotificationEvent,
105 (status) ? TRUE : FALSE
111 * To wait on an event to syncrhonize the process
114 * event: pointer to the event object
115 * timeout: the timeout for waitting or 0 means infinite time.
118 * Zero: waiting timeouts
119 * Non Zero: event signaled ...
125 static inline int64_t
126 cfs_wait_event(event_t * event, int64_t timeout)
129 LARGE_INTEGER TimeOut;
131 TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
133 Status = KeWaitForSingleObject(
138 (timeout != 0) ? (&TimeOut) : (NULL)
141 if (Status == STATUS_TIMEOUT) {
145 return TRUE; // signaled case
150 * To signal the event object
153 * event: pointer to the event object
163 cfs_wake_event(event_t * event)
165 return (KeSetEvent(event, 0, FALSE) != 0);
170 * To clear/reset the status of the event object
173 * event: pointer to the event object
183 cfs_clear_event(event_t * event)
192 * All locks' declaration are not guaranteed to be initialized,
193 * Althought some of they are initialized in Linux. All locks
194 * declared by CFS_DECL_* should be initialized explicitly.
199 * spin lock defintions / routines
205 * for spinlock operations, try to grab nesting acquisition of
206 * spinlock will cause dead-lock in MP system and current irql
207 * overwritten for UP system. (UP system could allow nesting spin
208 * acqisition, because it's not spin at all just raising the irql.)
212 typedef struct spin_lock {
220 #define CFS_DECL_SPIN(name) spinlock_t name;
221 #define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
224 static inline void spin_lock_init(spinlock_t *lock)
226 KeInitializeSpinLock(&(lock->lock));
230 static inline void spin_lock(spinlock_t *lock)
232 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
235 static inline void spin_unlock(spinlock_t *lock)
237 KIRQL irql = lock->irql;
238 KeReleaseSpinLock(&(lock->lock), irql);
242 #define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0)
243 #define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0)
246 /* There's no corresponding routine in windows kernel.
247 We must realize a light one of our own. But there's
248 no way to identify the system is MP build or UP build
249 on the runtime. We just uses a workaround for it. */
253 static int spin_trylock(spinlock_t *lock)
258 ASSERT(lock != NULL);
260 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
263 if (0 == (ulong_ptr)lock->lock) {
266 mov edx, dword ptr [ebp + 8]
267 lock bts dword ptr[edx], 0
290 /* synchronization between cpus: it will disable all DPCs
291 kernel task scheduler on the CPU */
292 #define spin_lock_bh(x) spin_lock(x)
293 #define spin_unlock_bh(x) spin_unlock(x)
294 #define spin_lock_bh_init(x) spin_lock_init(x)
297 * rw_semaphore (using ERESOURCE)
301 typedef struct rw_semaphore {
306 #define CFS_DECL_RWSEM(name) rw_semaphore_t name
307 #define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
312 * To initialize the the rw_semaphore_t structure
315 * rwsem: pointer to the rw_semaphore_t structure
324 static inline void init_rwsem(rw_semaphore_t *s)
326 ExInitializeResourceLite(&s->rwsem);
332 * To finilize/destroy the the rw_semaphore_t structure
335 * rwsem: pointer to the rw_semaphore_t structure
341 * For winnt system, we need this routine to delete the ERESOURCE.
342 * Just define it NULL for other systems.
345 static inline void fini_rwsem(rw_semaphore_t *s)
347 ExDeleteResourceLite(&s->rwsem);
352 * To acquire read-lock of the rw_semahore
355 * rwsem: pointer to the rw_semaphore_t structure
364 static inline void down_read(struct rw_semaphore *s)
366 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
372 * To acquire read-lock of the rw_semahore without blocking
375 * rwsem: pointer to the rw_semaphore_t structure
378 * Zero: failed to acquire the read lock
379 * Non-Zero: succeeded to acquire the read lock
382 * This routine will return immediately without waiting.
385 static inline int down_read_trylock(struct rw_semaphore *s)
387 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
393 * To acquire write-lock of the rw_semahore
396 * rwsem: pointer to the rw_semaphore_t structure
405 static inline void down_write(struct rw_semaphore *s)
407 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
413 * To acquire write-lock of the rw_semahore without blocking
416 * rwsem: pointer to the rw_semaphore_t structure
419 * Zero: failed to acquire the write lock
420 * Non-Zero: succeeded to acquire the read lock
423 * This routine will return immediately without waiting.
426 static inline int down_write_trylock(struct rw_semaphore *s)
428 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
434 * To release read-lock of the rw_semahore
437 * rwsem: pointer to the rw_semaphore_t structure
446 static inline void up_read(struct rw_semaphore *s)
448 ExReleaseResourceForThreadLite(
450 ExGetCurrentResourceThread());
456 * To release write-lock of the rw_semahore
459 * rwsem: pointer to the rw_semaphore_t structure
468 static inline void up_write(struct rw_semaphore *s)
470 ExReleaseResourceForThreadLite(
472 ExGetCurrentResourceThread());
476 * rwlock_t (using sempahore)
490 void rwlock_init(rwlock_t * rwlock);
491 void rwlock_fini(rwlock_t * rwlock);
493 void read_lock(rwlock_t * rwlock);
494 void read_unlock(rwlock_t * rwlock);
495 void write_lock(rwlock_t * rwlock);
496 void write_unlock(rwlock_t * rwlock);
498 #define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0)
499 #define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0)
500 #define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0)
501 #define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0)
512 typedef struct semaphore {
516 static inline void sema_init(struct semaphore *s, int val)
518 KeInitializeSemaphore(&s->sem, val, val);
521 static inline void __down(struct semaphore *s)
523 KeWaitForSingleObject( &(s->sem), Executive,
524 KernelMode, FALSE, NULL );
528 static inline void __up(struct semaphore *s)
530 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
537 * - init_mutex_locked(x)
545 * To initialize a mutex_t structure
548 * mutex: pointer to the mutex_t structure
557 static inline void init_mutex(mutex_t *mutex)
565 * To acquire the mutex lock
568 * mutex: pointer to the mutex_t structure
577 static inline void mutex_down(mutex_t *mutex)
585 * To release the mutex lock (acquired already)
588 * mutex: pointer to the mutex_t structure
597 static inline void mutex_up(mutex_t *mutex)
605 * To initialize the mutex as acquired state
608 * mutex: pointer to the mutex_t structure
617 static inline init_mutex_locked(mutex_t *mutex)
626 * - init_complition(c)
628 * - wait_for_completion(c)
638 * To initialize the completion object
641 * c: pointer to the completion structure
650 static inline void init_completion(struct completion *c)
652 cfs_init_event(&(c->event), 1, FALSE);
658 * To complete/signal the completion object
661 * c: pointer to the completion structure
670 static inline void complete(struct completion *c)
672 cfs_wake_event(&(c->event));
676 * wait_for_completion
677 * To wait on the completion object. If the event is signaled,
678 * this function will return to the call with the event un-singled.
681 * c: pointer to the completion structure
690 static inline void wait_for_completion(struct completion *c)
692 cfs_wait_event(&(c->event), 0);
698 #include "../user-lock.h"