1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=4:tabstop=4:
4 * Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Basic library routines.
25 #ifndef __LIBCFS_WINNT_CFS_LOCK_H__
26 #define __LIBCFS_WINNT_CFS_LOCK_H__
28 #ifndef __LIBCFS_LIBCFS_H__
29 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
36 * nt specific part ...
42 typedef struct { volatile int counter; } atomic_t;
44 #define ATOMIC_INIT(i) { i }
46 #define atomic_read(v) ((v)->counter)
47 #define atomic_set(v,i) (((v)->counter) = (i))
49 void FASTCALL atomic_add(int i, atomic_t *v);
50 void FASTCALL atomic_sub(int i, atomic_t *v);
52 int FASTCALL atomic_sub_and_test(int i, atomic_t *v);
54 void FASTCALL atomic_inc(atomic_t *v);
55 void FASTCALL atomic_dec(atomic_t *v);
57 int FASTCALL atomic_dec_and_test(atomic_t *v);
58 int FASTCALL atomic_inc_and_test(atomic_t *v);
63 typedef KEVENT event_t;
67 * To initialize the event object
70 * event: pointer to the event object
71 * type: Non Zero: SynchronizationEvent
72 * Zero: NotificationEvent
73 * status: the initial stats of the event
84 cfs_init_event(event_t *event, int type, int status)
88 (type) ? SynchronizationEvent: NotificationEvent,
89 (status) ? TRUE : FALSE
95 * To wait on an event to syncrhonize the process
98 * event: pointer to the event object
99 * timeout: the timeout for waitting or 0 means infinite time.
102 * Zero: waiting timeouts
103 * Non Zero: event signaled ...
109 static inline int64_t
110 cfs_wait_event(event_t * event, int64_t timeout)
113 LARGE_INTEGER TimeOut;
115 TimeOut.QuadPart = -1 * (10000000/HZ) * timeout;
117 Status = KeWaitForSingleObject(
122 (timeout != 0) ? (&TimeOut) : (NULL)
125 if (Status == STATUS_TIMEOUT) {
129 return TRUE; // signaled case
134 * To signal the event object
137 * event: pointer to the event object
147 cfs_wake_event(event_t * event)
149 return (KeSetEvent(event, 0, FALSE) != 0);
154 * To clear/reset the status of the event object
157 * event: pointer to the event object
167 cfs_clear_event(event_t * event)
176 * All locks' declaration are not guaranteed to be initialized,
177 * Althought some of they are initialized in Linux. All locks
178 * declared by CFS_DECL_* should be initialized explicitly.
183 * spin lock defintions / routines
189 * for spinlock operations, try to grab nesting acquisition of
190 * spinlock will cause dead-lock in MP system and current irql
191 * overwritten for UP system. (UP system could allow nesting spin
192 * acqisition, because it's not spin at all just raising the irql.)
196 typedef struct spin_lock {
204 #define CFS_DECL_SPIN(name) spinlock_t name;
205 #define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
208 static inline void spin_lock_init(spinlock_t *lock)
210 KeInitializeSpinLock(&(lock->lock));
214 static inline void spin_lock(spinlock_t *lock)
216 KeAcquireSpinLock(&(lock->lock), &(lock->irql));
219 static inline void spin_unlock(spinlock_t *lock)
221 KIRQL irql = lock->irql;
222 KeReleaseSpinLock(&(lock->lock), irql);
226 #define spin_lock_irqsave(lock, flags) do {(flags) = 0; spin_lock(lock);} while(0)
227 #define spin_unlock_irqrestore(lock, flags) do {spin_unlock(lock);} while(0)
230 /* There's no corresponding routine in windows kernel.
231 We must realize a light one of our own. But there's
232 no way to identify the system is MP build or UP build
233 on the runtime. We just uses a workaround for it. */
237 static int spin_trylock(spinlock_t *lock)
242 ASSERT(lock != NULL);
244 KeRaiseIrql(DISPATCH_LEVEL, &Irql);
247 if (0 == (ulong_ptr)lock->lock) {
250 mov edx, dword ptr [ebp + 8]
251 lock bts dword ptr[edx], 0
274 /* synchronization between cpus: it will disable all DPCs
275 kernel task scheduler on the CPU */
276 #define spin_lock_bh(x) spin_lock(x)
277 #define spin_unlock_bh(x) spin_unlock(x)
278 #define spin_lock_bh_init(x) spin_lock_init(x)
281 * rw_semaphore (using ERESOURCE)
285 typedef struct rw_semaphore {
290 #define CFS_DECL_RWSEM(name) rw_semaphore_t name
291 #define CFS_DECL_RWSEM_EXTERN(name) extern rw_semaphore_t name
296 * To initialize the the rw_semaphore_t structure
299 * rwsem: pointer to the rw_semaphore_t structure
308 static inline void init_rwsem(rw_semaphore_t *s)
310 ExInitializeResourceLite(&s->rwsem);
316 * To finilize/destroy the the rw_semaphore_t structure
319 * rwsem: pointer to the rw_semaphore_t structure
325 * For winnt system, we need this routine to delete the ERESOURCE.
326 * Just define it NULL for other systems.
329 static inline void fini_rwsem(rw_semaphore_t *s)
331 ExDeleteResourceLite(&s->rwsem);
336 * To acquire read-lock of the rw_semahore
339 * rwsem: pointer to the rw_semaphore_t structure
348 static inline void down_read(struct rw_semaphore *s)
350 ExAcquireResourceSharedLite(&s->rwsem, TRUE);
356 * To acquire read-lock of the rw_semahore without blocking
359 * rwsem: pointer to the rw_semaphore_t structure
362 * Zero: failed to acquire the read lock
363 * Non-Zero: succeeded to acquire the read lock
366 * This routine will return immediately without waiting.
369 static inline int down_read_trylock(struct rw_semaphore *s)
371 return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
377 * To acquire write-lock of the rw_semahore
380 * rwsem: pointer to the rw_semaphore_t structure
389 static inline void down_write(struct rw_semaphore *s)
391 ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
397 * To acquire write-lock of the rw_semahore without blocking
400 * rwsem: pointer to the rw_semaphore_t structure
403 * Zero: failed to acquire the write lock
404 * Non-Zero: succeeded to acquire the read lock
407 * This routine will return immediately without waiting.
410 static inline int down_write_trylock(struct rw_semaphore *s)
412 return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
418 * To release read-lock of the rw_semahore
421 * rwsem: pointer to the rw_semaphore_t structure
430 static inline void up_read(struct rw_semaphore *s)
432 ExReleaseResourceForThreadLite(
434 ExGetCurrentResourceThread());
440 * To release write-lock of the rw_semahore
443 * rwsem: pointer to the rw_semaphore_t structure
452 static inline void up_write(struct rw_semaphore *s)
454 ExReleaseResourceForThreadLite(
456 ExGetCurrentResourceThread());
460 * rwlock_t (using sempahore)
474 void rwlock_init(rwlock_t * rwlock);
475 void rwlock_fini(rwlock_t * rwlock);
477 void read_lock(rwlock_t * rwlock);
478 void read_unlock(rwlock_t * rwlock);
479 void write_lock(rwlock_t * rwlock);
480 void write_unlock(rwlock_t * rwlock);
482 #define write_lock_irqsave(l, f) do {f = 0; write_lock(l);} while(0)
483 #define write_unlock_irqrestore(l, f) do {write_unlock(l);} while(0)
484 #define read_lock_irqsave(l, f) do {f=0; read_lock(l);} while(0)
485 #define read_unlock_irqrestore(l, f) do {read_unlock(l);} while(0)
496 typedef struct semaphore {
500 static inline void sema_init(struct semaphore *s, int val)
502 KeInitializeSemaphore(&s->sem, val, val);
505 static inline void __down(struct semaphore *s)
507 KeWaitForSingleObject( &(s->sem), Executive,
508 KernelMode, FALSE, NULL );
512 static inline void __up(struct semaphore *s)
514 KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
521 * - init_mutex_locked(x)
529 * To initialize a mutex_t structure
532 * mutex: pointer to the mutex_t structure
541 static inline void init_mutex(mutex_t *mutex)
549 * To acquire the mutex lock
552 * mutex: pointer to the mutex_t structure
561 static inline void mutex_down(mutex_t *mutex)
569 * To release the mutex lock (acquired already)
572 * mutex: pointer to the mutex_t structure
581 static inline void mutex_up(mutex_t *mutex)
589 * To initialize the mutex as acquired state
592 * mutex: pointer to the mutex_t structure
601 static inline init_mutex_locked(mutex_t *mutex)
610 * - init_complition(c)
612 * - wait_for_completion(c)
622 * To initialize the completion object
625 * c: pointer to the completion structure
634 static inline void init_completion(struct completion *c)
636 cfs_init_event(&(c->event), 1, FALSE);
642 * To complete/signal the completion object
645 * c: pointer to the completion structure
654 static inline void complete(struct completion *c)
656 cfs_wake_event(&(c->event));
660 * wait_for_completion
661 * To wait on the completion object. If the event is signaled,
662 * this function will return to the call with the event un-singled.
665 * c: pointer to the completion structure
674 static inline void wait_for_completion(struct completion *c)
676 cfs_wait_event(&(c->event), 0);
682 #include "../user-lock.h"