4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * libcfs/include/libcfs/user-lock.h
36 * Author: Nikita Danilov <nikita@clusterfs.com>
39 #ifndef __LIBCFS_USER_LOCK_H__
40 #define __LIBCFS_USER_LOCK_H__
42 #ifndef __LIBCFS_LIBCFS_H__
43 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
46 /* Implementations of portable synchronization APIs for liblustre */
49 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
51 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
52 * if we define lock APIs at here, there will be conflict with liblustre
59 * The userspace implementations of linux/spinlock.h vary; we just
60 * include our own for all of them
62 #define __LINUX_SPINLOCK_H
65 * Optional debugging (magic stamping and checking ownership) can be added.
75 * - spin_lock_bh_init(x)
80 * - spin_lock_irqsave(x, f)
81 * - spin_unlock_irqrestore(x, f)
83 * No-op implementation.
85 struct spin_lock { int foo; };
87 typedef struct spin_lock spinlock_t;
89 #define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
90 #define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
91 #define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
92 #define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
93 #define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
95 void spin_lock_init(spinlock_t *lock);
96 void spin_lock(spinlock_t *lock);
97 void spin_unlock(spinlock_t *lock);
98 int spin_trylock(spinlock_t *lock);
99 void spin_lock_bh_init(spinlock_t *lock);
100 void spin_lock_bh(spinlock_t *lock);
101 void spin_unlock_bh(spinlock_t *lock);
103 static inline int spin_is_locked(spinlock_t *l) { return 1; }
104 static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
105 static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
118 void sema_init(struct semaphore *s, int val);
119 void __up(struct semaphore *s);
120 void __down(struct semaphore *s);
121 int __down_interruptible(struct semaphore *s);
123 #define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
125 #define up(s) __up(s)
126 #define down(s) __down(s)
127 #define down_interruptible(s) __down_interruptible(s)
129 static inline int down_trylock(struct semaphore *sem)
137 * - init_completion_module(c)
138 * - call_wait_handler(t)
139 * - init_completion(c)
141 * - wait_for_completion(c)
142 * - wait_for_completion_interruptible(c)
149 typedef int (*wait_handler_t) (int timeout);
150 void init_completion_module(wait_handler_t handler);
151 int call_wait_handler(int timeout);
152 void init_completion(struct completion *c);
153 void complete(struct completion *c);
154 void wait_for_completion(struct completion *c);
155 int wait_for_completion_interruptible(struct completion *c);
157 #define COMPLETION_INITIALIZER(work) \
158 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
161 #define INIT_COMPLETION(x) ((x).done = 0)
169 * - down_read_trylock(x)
170 * - down_write(struct rw_semaphore *s);
171 * - down_write_trylock(struct rw_semaphore *s);
176 struct rw_semaphore {
180 void init_rwsem(struct rw_semaphore *s);
181 void down_read(struct rw_semaphore *s);
182 int down_read_trylock(struct rw_semaphore *s);
183 void down_write(struct rw_semaphore *s);
184 int down_write_trylock(struct rw_semaphore *s);
185 void up_read(struct rw_semaphore *s);
186 void up_write(struct rw_semaphore *s);
187 void fini_rwsem(struct rw_semaphore *s);
188 #define DECLARE_RWSEM(name) struct rw_semaphore name = { }
191 * read-write lock : Need to be investigated more!!
192 * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
199 * - write_lock_irqsave(x)
200 * - write_unlock_irqrestore(x)
201 * - read_lock_irqsave(x)
202 * - read_unlock_irqrestore(x)
204 #define rwlock_t struct rw_semaphore
205 #define DEFINE_RWLOCK(lock) rwlock_t lock = { }
207 #define rwlock_init(pl) init_rwsem(pl)
209 #define read_lock(l) down_read(l)
210 #define read_unlock(l) up_read(l)
211 #define write_lock(l) down_write(l)
212 #define write_unlock(l) up_write(l)
214 static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
219 static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
224 static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
229 static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
235 * Atomic for single-threaded user-space
237 typedef struct { volatile int counter; } cfs_atomic_t;
239 #define CFS_ATOMIC_INIT(i) { (i) }
241 #define cfs_atomic_read(a) ((a)->counter)
242 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
243 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
244 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
245 #define cfs_atomic_inc(a) (((a)->counter)++)
246 #define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
247 #define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
248 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
249 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
250 #define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
251 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
252 #define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
253 #define cfs_atomic_add_unless(v, a, u) \
254 ((v)->counter != u ? (v)->counter += a : 0)
255 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
256 #define cfs_atomic_cmpxchg(v, ov, nv) \
257 ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
259 #ifdef HAVE_LIBPTHREAD
263 * Multi-threaded user space completion APIs
268 pthread_cond_t c_cond;
269 pthread_mutex_t c_mut;
272 void mt_init_completion(mt_completion_t *c);
273 void mt_fini_completion(mt_completion_t *c);
274 void mt_complete(mt_completion_t *c);
275 void mt_wait_for_completion(mt_completion_t *c);
278 * Multi-threaded user space atomic APIs
281 typedef struct { volatile int counter; } mt_atomic_t;
283 int mt_atomic_read(mt_atomic_t *a);
284 void mt_atomic_set(mt_atomic_t *a, int b);
285 int mt_atomic_dec_and_test(mt_atomic_t *a);
286 void mt_atomic_inc(mt_atomic_t *a);
287 void mt_atomic_dec(mt_atomic_t *a);
288 void mt_atomic_add(int b, mt_atomic_t *a);
289 void mt_atomic_sub(int b, mt_atomic_t *a);
291 #endif /* HAVE_LIBPTHREAD */
293 /**************************************************************************
297 **************************************************************************/
298 #define mutex semaphore
300 #define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
302 static inline void mutex_init(struct mutex *mutex)
307 static inline void mutex_lock(struct mutex *mutex)
312 static inline void mutex_unlock(struct mutex *mutex)
317 static inline int mutex_lock_interruptible(struct mutex *mutex)
319 return down_interruptible(mutex);
323 * Try-lock this mutex.
325 * Note, return values are negation of what is expected from down_trylock() or
326 * pthread_mutex_trylock().
328 * \retval 1 try-lock succeeded (lock acquired).
329 * \retval 0 indicates lock contention.
331 static inline int mutex_trylock(struct mutex *mutex)
333 return !down_trylock(mutex);
336 static inline void mutex_destroy(struct mutex *lock)
341 * This is for use in assertions _only_, i.e., this function should always
344 * \retval 1 mutex is locked.
346 * \retval 0 mutex is not locked. This should never happen.
348 static inline int mutex_is_locked(struct mutex *lock)
354 /**************************************************************************
356 * Lockdep "implementation". Also see lustre_compat25.h
358 **************************************************************************/
360 struct lock_class_key {
364 static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
368 static inline void lockdep_off(void)
372 static inline void lockdep_on(void)
376 #define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
377 #define spin_lock_nested(lock, subclass) spin_lock(lock)
378 #define down_read_nested(lock, subclass) down_read(lock)
379 #define down_write_nested(lock, subclass) down_write(lock)
385 /* __LIBCFS_USER_LOCK_H__ */
389 * c-indentation-style: "K&R"