4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/user-lock.h
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 #ifndef __LIBCFS_USER_LOCK_H__
42 #define __LIBCFS_USER_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
48 /* Implementations of portable synchronization APIs for liblustre */
51 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
53 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
54 * if we define lock APIs at here, there will be conflict with liblustre
61 * The userspace implementations of linux/spinlock.h vary; we just
62 * include our own for all of them
64 #define __LINUX_SPINLOCK_H
67 * Optional debugging (magic stamping and checking ownership) can be added.
77 * - spin_lock_bh_init(x)
82 * - spin_lock_irqsave(x, f)
83 * - spin_unlock_irqrestore(x, f)
85 * No-op implementation.
87 struct spin_lock { int foo; };
89 typedef struct spin_lock spinlock_t;
91 #define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
92 #define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
93 #define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
94 #define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
95 #define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
97 void spin_lock_init(spinlock_t *lock);
98 void spin_lock(spinlock_t *lock);
99 void spin_unlock(spinlock_t *lock);
100 int spin_trylock(spinlock_t *lock);
101 void spin_lock_bh_init(spinlock_t *lock);
102 void spin_lock_bh(spinlock_t *lock);
103 void spin_unlock_bh(spinlock_t *lock);
105 static inline int spin_is_locked(spinlock_t *l) { return 1; }
106 static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
107 static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
120 void sema_init(struct semaphore *s, int val);
121 void __up(struct semaphore *s);
122 void __down(struct semaphore *s);
123 int __down_interruptible(struct semaphore *s);
125 #define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
127 #define up(s) __up(s)
128 #define down(s) __down(s)
129 #define down_interruptible(s) __down_interruptible(s)
131 static inline int down_trylock(struct semaphore *sem)
139 * - init_completion_module(c)
140 * - call_wait_handler(t)
141 * - init_completion(c)
143 * - wait_for_completion(c)
144 * - wait_for_completion_interruptible(c)
151 typedef int (*wait_handler_t) (int timeout);
152 void init_completion_module(wait_handler_t handler);
153 int call_wait_handler(int timeout);
154 void init_completion(struct completion *c);
155 void complete(struct completion *c);
156 void wait_for_completion(struct completion *c);
157 int wait_for_completion_interruptible(struct completion *c);
159 #define COMPLETION_INITIALIZER(work) \
160 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
163 #define INIT_COMPLETION(x) ((x).done = 0)
171 * - down_read_trylock(x)
172 * - down_write(struct rw_semaphore *s);
173 * - down_write_trylock(struct rw_semaphore *s);
178 struct rw_semaphore {
182 void init_rwsem(struct rw_semaphore *s);
183 void down_read(struct rw_semaphore *s);
184 int down_read_trylock(struct rw_semaphore *s);
185 void down_write(struct rw_semaphore *s);
186 int down_write_trylock(struct rw_semaphore *s);
187 void up_read(struct rw_semaphore *s);
188 void up_write(struct rw_semaphore *s);
189 void fini_rwsem(struct rw_semaphore *s);
190 #define DECLARE_RWSEM(name) struct rw_semaphore name = { }
193 * read-write lock : Need to be investigated more!!
194 * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
201 * - write_lock_irqsave(x)
202 * - write_unlock_irqrestore(x)
203 * - read_lock_irqsave(x)
204 * - read_unlock_irqrestore(x)
206 #define rwlock_t struct rw_semaphore
207 #define DEFINE_RWLOCK(lock) rwlock_t lock = { }
209 #define rwlock_init(pl) init_rwsem(pl)
211 #define read_lock(l) down_read(l)
212 #define read_unlock(l) up_read(l)
213 #define write_lock(l) down_write(l)
214 #define write_unlock(l) up_write(l)
216 static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
221 static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
226 static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
231 static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
237 * Atomic for single-threaded user-space
239 typedef struct { volatile int counter; } cfs_atomic_t;
241 #define CFS_ATOMIC_INIT(i) { (i) }
243 #define cfs_atomic_read(a) ((a)->counter)
244 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
245 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
246 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
247 #define cfs_atomic_inc(a) (((a)->counter)++)
248 #define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
249 #define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
250 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
251 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
252 #define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
253 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
254 #define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
255 #define cfs_atomic_add_unless(v, a, u) \
256 ((v)->counter != u ? (v)->counter += a : 0)
257 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
258 #define cfs_atomic_cmpxchg(v, ov, nv) \
259 ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
261 #ifdef HAVE_LIBPTHREAD
265 * Multi-threaded user space completion APIs
270 pthread_cond_t c_cond;
271 pthread_mutex_t c_mut;
274 void mt_init_completion(mt_completion_t *c);
275 void mt_fini_completion(mt_completion_t *c);
276 void mt_complete(mt_completion_t *c);
277 void mt_wait_for_completion(mt_completion_t *c);
280 * Multi-threaded user space atomic APIs
283 typedef struct { volatile int counter; } mt_atomic_t;
285 int mt_atomic_read(mt_atomic_t *a);
286 void mt_atomic_set(mt_atomic_t *a, int b);
287 int mt_atomic_dec_and_test(mt_atomic_t *a);
288 void mt_atomic_inc(mt_atomic_t *a);
289 void mt_atomic_dec(mt_atomic_t *a);
290 void mt_atomic_add(int b, mt_atomic_t *a);
291 void mt_atomic_sub(int b, mt_atomic_t *a);
293 #endif /* HAVE_LIBPTHREAD */
295 /**************************************************************************
299 **************************************************************************/
300 #define mutex semaphore
302 #define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
304 static inline void mutex_init(struct mutex *mutex)
309 static inline void mutex_lock(struct mutex *mutex)
314 static inline void mutex_unlock(struct mutex *mutex)
319 static inline int mutex_lock_interruptible(struct mutex *mutex)
321 return down_interruptible(mutex);
325 * Try-lock this mutex.
327 * Note, return values are negation of what is expected from down_trylock() or
328 * pthread_mutex_trylock().
330 * \retval 1 try-lock succeeded (lock acquired).
331 * \retval 0 indicates lock contention.
333 static inline int mutex_trylock(struct mutex *mutex)
335 return !down_trylock(mutex);
338 static inline void mutex_destroy(struct mutex *lock)
343 * This is for use in assertions _only_, i.e., this function should always
346 * \retval 1 mutex is locked.
348 * \retval 0 mutex is not locked. This should never happen.
350 static inline int mutex_is_locked(struct mutex *lock)
356 /**************************************************************************
358 * Lockdep "implementation". Also see lustre_compat25.h
360 **************************************************************************/
362 struct lock_class_key {
366 static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
370 static inline void lockdep_off(void)
374 static inline void lockdep_on(void)
378 #define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
379 #define spin_lock_nested(lock, subclass) spin_lock(lock)
380 #define down_read_nested(lock, subclass) down_read(lock)
381 #define down_write_nested(lock, subclass) down_write(lock)
387 /* __LIBCFS_USER_LOCK_H__ */
391 * c-indentation-style: "K&R"