4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/user-lock.h
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 #ifndef __LIBCFS_USER_LOCK_H__
42 #define __LIBCFS_USER_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
48 /* Implementations of portable synchronization APIs for liblustre */
51 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
53 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
54 * if we define lock APIs at here, there will be conflict with liblustre
61 * The userspace implementations of linux/spinlock.h vary; we just
62 * include our own for all of them
64 #define __LINUX_SPINLOCK_H
67 * Optional debugging (magic stamping and checking ownership) can be added.
77 * - spin_lock_bh_init(x)
82 * - spin_lock_irqsave(x, f)
83 * - spin_unlock_irqrestore(x, f)
85 * No-op implementation.
87 struct spin_lock { int foo; };
89 typedef struct spin_lock spinlock_t;
91 #define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
92 #define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
93 #define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
94 #define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
95 #define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
97 void spin_lock_init(spinlock_t *lock);
98 void spin_lock(spinlock_t *lock);
99 void spin_unlock(spinlock_t *lock);
100 int spin_trylock(spinlock_t *lock);
101 void spin_lock_bh_init(spinlock_t *lock);
102 void spin_lock_bh(spinlock_t *lock);
103 void spin_unlock_bh(spinlock_t *lock);
105 static inline int spin_is_locked(spinlock_t *l) { return 1; }
106 static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
107 static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
120 void sema_init(struct semaphore *s, int val);
121 void __up(struct semaphore *s);
122 void __down(struct semaphore *s);
123 int __down_interruptible(struct semaphore *s);
125 #define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
127 #define up(s) __up(s)
128 #define down(s) __down(s)
129 #define down_interruptible(s) __down_interruptible(s)
131 static inline int down_trylock(struct semaphore *sem)
139 * - init_completion_module(c)
140 * - call_wait_handler(t)
141 * - init_completion(c)
143 * - wait_for_completion(c)
144 * - wait_for_completion_interruptible(c)
146 #ifdef HAVE_LIBPTHREAD
150 * Multi-threaded user space completion APIs
155 pthread_cond_t c_cond;
156 pthread_mutex_t c_mut;
159 #else /* !HAVE_LIBPTHREAD */
165 #endif /* HAVE_LIBPTHREAD */
167 typedef int (*wait_handler_t) (int timeout);
168 void init_completion_module(wait_handler_t handler);
169 int call_wait_handler(int timeout);
170 void init_completion(struct completion *c);
171 void fini_completion(struct completion *c);
172 void complete(struct completion *c);
173 void wait_for_completion(struct completion *c);
174 int wait_for_completion_interruptible(struct completion *c);
176 #define COMPLETION_INITIALIZER(work) \
177 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
180 #define INIT_COMPLETION(x) ((x).done = 0)
188 * - down_read_trylock(x)
189 * - down_write(struct rw_semaphore *s);
190 * - down_write_trylock(struct rw_semaphore *s);
195 struct rw_semaphore {
199 void init_rwsem(struct rw_semaphore *s);
200 void down_read(struct rw_semaphore *s);
201 int down_read_trylock(struct rw_semaphore *s);
202 void down_write(struct rw_semaphore *s);
203 int down_write_trylock(struct rw_semaphore *s);
204 void up_read(struct rw_semaphore *s);
205 void up_write(struct rw_semaphore *s);
206 void fini_rwsem(struct rw_semaphore *s);
207 #define DECLARE_RWSEM(name) struct rw_semaphore name = { }
210 * read-write lock : Need to be investigated more!!
211 * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
218 * - write_lock_irqsave(x)
219 * - write_unlock_irqrestore(x)
220 * - read_lock_irqsave(x)
221 * - read_unlock_irqrestore(x)
223 #define rwlock_t struct rw_semaphore
224 #define DEFINE_RWLOCK(lock) rwlock_t lock = { }
226 #define rwlock_init(pl) init_rwsem(pl)
228 #define read_lock(l) down_read(l)
229 #define read_unlock(l) up_read(l)
230 #define write_lock(l) down_write(l)
231 #define write_unlock(l) up_write(l)
233 static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
238 static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
243 static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
248 static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
254 * Atomic for single-threaded user-space
256 typedef struct { volatile int counter; } cfs_atomic_t;
258 #define CFS_ATOMIC_INIT(i) { (i) }
260 #define cfs_atomic_read(a) ((a)->counter)
261 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
262 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
263 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
264 #define cfs_atomic_inc(a) (((a)->counter)++)
265 #define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
266 #define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
267 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
268 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
269 #define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
270 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
271 #define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
272 #define cfs_atomic_add_unless(v, a, u) \
273 ((v)->counter != u ? (v)->counter += a : 0)
274 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
275 #define cfs_atomic_cmpxchg(v, ov, nv) \
276 ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
278 #ifdef HAVE_LIBPTHREAD
282 * Multi-threaded user space atomic APIs
285 typedef struct { volatile int counter; } mt_atomic_t;
287 int mt_atomic_read(mt_atomic_t *a);
288 void mt_atomic_set(mt_atomic_t *a, int b);
289 int mt_atomic_dec_and_test(mt_atomic_t *a);
290 void mt_atomic_inc(mt_atomic_t *a);
291 void mt_atomic_dec(mt_atomic_t *a);
292 void mt_atomic_add(int b, mt_atomic_t *a);
293 void mt_atomic_sub(int b, mt_atomic_t *a);
295 #endif /* HAVE_LIBPTHREAD */
297 /**************************************************************************
301 **************************************************************************/
302 #define mutex semaphore
304 #define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
306 static inline void mutex_init(struct mutex *mutex)
311 static inline void mutex_lock(struct mutex *mutex)
316 static inline void mutex_unlock(struct mutex *mutex)
321 static inline int mutex_lock_interruptible(struct mutex *mutex)
323 return down_interruptible(mutex);
327 * Try-lock this mutex.
329 * Note, return values are negation of what is expected from down_trylock() or
330 * pthread_mutex_trylock().
332 * \retval 1 try-lock succeeded (lock acquired).
333 * \retval 0 indicates lock contention.
335 static inline int mutex_trylock(struct mutex *mutex)
337 return !down_trylock(mutex);
340 static inline void mutex_destroy(struct mutex *lock)
345 * This is for use in assertions _only_, i.e., this function should always
348 * \retval 1 mutex is locked.
350 * \retval 0 mutex is not locked. This should never happen.
352 static inline int mutex_is_locked(struct mutex *lock)
358 /**************************************************************************
360 * Lockdep "implementation". Also see lustre_compat25.h
362 **************************************************************************/
364 struct lock_class_key {
368 static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
372 static inline void lockdep_off(void)
376 static inline void lockdep_on(void)
380 #define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
381 #define spin_lock_nested(lock, subclass) spin_lock(lock)
382 #define down_read_nested(lock, subclass) down_read(lock)
383 #define down_write_nested(lock, subclass) down_write(lock)
389 /* __LIBCFS_USER_LOCK_H__ */
393 * c-indentation-style: "K&R"