1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/user-lock.h
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 #ifndef __LIBCFS_USER_LOCK_H__
42 #define __LIBCFS_USER_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
48 /* Implementations of portable synchronization APIs for liblustre */
51 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
53 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
54 * if we define lock APIs at here, there will be conflict with liblustre
61 * The userspace implementations of linux/spinlock.h vary; we just
62 * include our own for all of them
64 #define __LINUX_SPINLOCK_H
67 * Optional debugging (magic stamping and checking ownership) can be added.
73 * - cfs_spin_lock_init(x)
75 * - cfs_spin_unlock(x)
76 * - cfs_spin_trylock(x)
77 * - cfs_spin_lock_bh_init(x)
78 * - cfs_spin_lock_bh(x)
79 * - cfs_spin_unlock_bh(x)
81 * - cfs_spin_is_locked(x)
82 * - cfs_spin_lock_irqsave(x, f)
83 * - cfs_spin_unlock_irqrestore(x, f)
85 * No-op implementation.
87 struct cfs_spin_lock {int foo;};
89 typedef struct cfs_spin_lock cfs_spinlock_t;
91 #define CFS_SPIN_LOCK_UNLOCKED (cfs_spinlock_t) { }
92 #define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
93 #define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
94 #define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
95 #define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
97 void cfs_spin_lock_init(cfs_spinlock_t *lock);
98 void cfs_spin_lock(cfs_spinlock_t *lock);
99 void cfs_spin_unlock(cfs_spinlock_t *lock);
100 int cfs_spin_trylock(cfs_spinlock_t *lock);
101 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
102 void cfs_spin_lock_bh(cfs_spinlock_t *lock);
103 void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
105 static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
106 static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
107 static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
113 * - cfs_sema_init(x, v)
117 typedef struct cfs_semaphore {
121 void cfs_sema_init(cfs_semaphore_t *s, int val);
122 void __up(cfs_semaphore_t *s);
123 void __down(cfs_semaphore_t *s);
124 int __down_interruptible(cfs_semaphore_t *s);
126 #define CFS_DEFINE_SEMAPHORE(name) cfs_semaphore_t name = { 1 }
128 #define cfs_up(s) __up(s)
129 #define cfs_down(s) __down(s)
130 #define cfs_down_interruptible(s) __down_interruptible(s)
132 static inline int cfs_down_trylock(cfs_semaphore_t *sem)
140 * - cfs_init_completion_module(c)
141 * - cfs_call_wait_handler(t)
142 * - cfs_init_completion(c)
144 * - cfs_wait_for_completion(c)
145 * - cfs_wait_for_completion_interruptible(c)
152 typedef int (*cfs_wait_handler_t) (int timeout);
153 void cfs_init_completion_module(cfs_wait_handler_t handler);
154 int cfs_call_wait_handler(int timeout);
155 void cfs_init_completion(cfs_completion_t *c);
156 void cfs_complete(cfs_completion_t *c);
157 void cfs_wait_for_completion(cfs_completion_t *c);
158 int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
160 #define CFS_COMPLETION_INITIALIZER(work) \
161 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
163 #define CFS_DECLARE_COMPLETION(work) \
164 cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
166 #define CFS_INIT_COMPLETION(x) ((x).done = 0)
172 * - cfs_init_rwsem(x)
174 * - cfs_down_read_trylock(x)
175 * - cfs_down_write(struct cfs_rw_semaphore *s);
176 * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
179 * - cfs_fini_rwsem(x)
181 typedef struct cfs_rw_semaphore {
183 } cfs_rw_semaphore_t;
185 void cfs_init_rwsem(cfs_rw_semaphore_t *s);
186 void cfs_down_read(cfs_rw_semaphore_t *s);
187 int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
188 void cfs_down_write(cfs_rw_semaphore_t *s);
189 int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
190 void cfs_up_read(cfs_rw_semaphore_t *s);
191 void cfs_up_write(cfs_rw_semaphore_t *s);
192 void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
193 #define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name = { }
196 * read-write lock : Need to be investigated more!!
197 * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
199 * - cfs_rwlock_init(x)
201 * - cfs_read_unlock(x)
202 * - cfs_write_lock(x)
203 * - cfs_write_unlock(x)
204 * - cfs_write_lock_irqsave(x)
205 * - cfs_write_unlock_irqrestore(x)
206 * - cfs_read_lock_irqsave(x)
207 * - cfs_read_unlock_irqrestore(x)
209 typedef cfs_rw_semaphore_t cfs_rwlock_t;
210 #define CFS_RW_LOCK_UNLOCKED (cfs_rwlock_t) { }
212 #define cfs_rwlock_init(pl) cfs_init_rwsem(pl)
214 #define cfs_read_lock(l) cfs_down_read(l)
215 #define cfs_read_unlock(l) cfs_up_read(l)
216 #define cfs_write_lock(l) cfs_down_write(l)
217 #define cfs_write_unlock(l) cfs_up_write(l)
220 cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
222 cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
225 cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
227 cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
230 * Atomic for single-threaded user-space
232 typedef struct { volatile int counter; } cfs_atomic_t;
234 #define CFS_ATOMIC_INIT(i) { (i) }
236 #define cfs_atomic_read(a) ((a)->counter)
237 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
238 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
239 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
240 #define cfs_atomic_inc(a) (((a)->counter)++)
241 #define cfs_atomic_dec(a) do { (a)->counter--; } while (0)
242 #define cfs_atomic_add(b,a) do {(a)->counter += b;} while (0)
243 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
244 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
245 #define cfs_atomic_sub(b,a) do {(a)->counter -= b;} while (0)
246 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
247 #define cfs_atomic_dec_return(a) cfs_atomic_sub_return(1,a)
248 #define cfs_atomic_add_unless(v, a, u) \
249 ((v)->counter != u ? (v)->counter += a : 0)
250 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
252 #ifdef HAVE_LIBPTHREAD
256 * Multi-threaded user space completion APIs
261 pthread_cond_t c_cond;
262 pthread_mutex_t c_mut;
263 } cfs_mt_completion_t;
265 void cfs_mt_init_completion(cfs_mt_completion_t *c);
266 void cfs_mt_fini_completion(cfs_mt_completion_t *c);
267 void cfs_mt_complete(cfs_mt_completion_t *c);
268 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
271 * Multi-threaded user space atomic APIs
274 typedef struct { volatile int counter; } cfs_mt_atomic_t;
276 int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
277 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
278 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
279 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
280 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
281 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
282 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
284 #endif /* HAVE_LIBPTHREAD */
286 /**************************************************************************
290 **************************************************************************/
291 typedef struct cfs_semaphore cfs_mutex_t;
293 #define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
295 static inline void cfs_mutex_init(cfs_mutex_t *mutex)
297 cfs_sema_init(mutex, 1);
300 static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
305 static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
310 static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
312 return cfs_down_interruptible(mutex);
316 * Try-lock this mutex.
318 * Note, return values are negation of what is expected from down_trylock() or
319 * pthread_mutex_trylock().
321 * \retval 1 try-lock succeeded (lock acquired).
322 * \retval 0 indicates lock contention.
324 static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
326 return !cfs_down_trylock(mutex);
329 static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
334 * This is for use in assertions _only_, i.e., this function should always
337 * \retval 1 mutex is locked.
339 * \retval 0 mutex is not locked. This should never happen.
341 static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
347 /**************************************************************************
349 * Lockdep "implementation". Also see lustre_compat25.h
351 **************************************************************************/
353 typedef struct cfs_lock_class_key {
355 } cfs_lock_class_key_t;
357 static inline void cfs_lockdep_set_class(void *lock,
358 cfs_lock_class_key_t *key)
362 static inline void cfs_lockdep_off(void)
366 static inline void cfs_lockdep_on(void)
370 #define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
371 #define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
372 #define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
373 #define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
379 /* __LIBCFS_USER_LOCK_H__ */
383 * c-indentation-style: "K&R"