Whamcloud - gitweb
b=20878 change kernelcomms from netlink to pipes
[fs/lustre-release.git] / libcfs / include / libcfs / user-lock.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/include/libcfs/user-lock.h
37  *
38  * Author: Nikita Danilov <nikita@clusterfs.com>
39  */
40
41 #ifndef __LIBCFS_USER_LOCK_H__
42 #define __LIBCFS_USER_LOCK_H__
43
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
46 #endif
47
48 /* Implementations of portable synchronization APIs for liblustre */
49
50 /*
51  * liblustre is single-threaded, so most "synchronization" APIs are trivial.
52  *
53  * XXX Liang: There are several branches share lnet with b_hd_newconfig,
54  * if we define lock APIs at here, there will be conflict with liblustre
55  * in other branches.
56  */
57
58 #ifndef __KERNEL__
59
60 /*
61  * The userspace implementations of linux/spinlock.h vary; we just
62  * include our own for all of them
63  */
64 #define __LINUX_SPINLOCK_H
65
66 /*
67  * Optional debugging (magic stamping and checking ownership) can be added.
68  */
69
70 /*
71  * cfs_spin_lock
72  *
73  * - cfs_spin_lock_init(x)
74  * - cfs_spin_lock(x)
75  * - cfs_spin_unlock(x)
76  * - cfs_spin_trylock(x)
77  * - cfs_spin_lock_bh_init(x)
78  * - cfs_spin_lock_bh(x)
79  * - cfs_spin_unlock_bh(x)
80  *
81  * - cfs_spin_is_locked(x)
82  * - cfs_spin_lock_irqsave(x, f)
83  * - cfs_spin_unlock_irqrestore(x, f)
84  *
85  * No-op implementation.
86  */
87 struct cfs_spin_lock {int foo;};
88
89 typedef struct cfs_spin_lock cfs_spinlock_t;
90
91 #define CFS_SPIN_LOCK_UNLOCKED (cfs_spinlock_t) { }
92 #define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
93 #define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
94 #define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
95
96 void cfs_spin_lock_init(cfs_spinlock_t *lock);
97 void cfs_spin_lock(cfs_spinlock_t *lock);
98 void cfs_spin_unlock(cfs_spinlock_t *lock);
99 int cfs_spin_trylock(cfs_spinlock_t *lock);
100 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
101 void cfs_spin_lock_bh(cfs_spinlock_t *lock);
102 void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
103
104 static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
105 static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
106 static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
107                                               unsigned long f){}
108
109 /*
110  * Semaphore
111  *
112  * - cfs_sema_init(x, v)
113  * - __down(x)
114  * - __up(x)
115  */
116 typedef struct cfs_semaphore {
117     int foo;
118 } cfs_semaphore_t;
119
120 void cfs_sema_init(cfs_semaphore_t *s, int val);
121 void __down(cfs_semaphore_t *s);
122 void __up(cfs_semaphore_t *s);
123
124 /*
125  * Completion:
126  *
127  * - cfs_init_completion_module(c)
128  * - cfs_call_wait_handler(t)
129  * - cfs_init_completion(c)
130  * - cfs_complete(c)
131  * - cfs_wait_for_completion(c)
132  * - cfs_wait_for_completion_interruptible(c)
133  */
134 typedef struct {
135         unsigned int done;
136         cfs_waitq_t wait;
137 } cfs_completion_t;
138
139 typedef int (*cfs_wait_handler_t) (int timeout);
140 void cfs_init_completion_module(cfs_wait_handler_t handler);
141 int  cfs_call_wait_handler(int timeout);
142 void cfs_init_completion(cfs_completion_t *c);
143 void cfs_complete(cfs_completion_t *c);
144 void cfs_wait_for_completion(cfs_completion_t *c);
145 int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
146
147 #define CFS_COMPLETION_INITIALIZER(work) \
148         { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
149
150 #define CFS_DECLARE_COMPLETION(work) \
151         cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
152
153 #define CFS_INIT_COMPLETION(x)      ((x).done = 0)
154
155
156 /*
157  * cfs_rw_semaphore:
158  *
159  * - cfs_init_rwsem(x)
160  * - cfs_down_read(x)
161  * - cfs_down_read_trylock(x)
162  * - cfs_down_write(struct cfs_rw_semaphore *s);
163  * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
164  * - cfs_up_read(x)
165  * - cfs_up_write(x)
166  * - cfs_fini_rwsem(x)
167  */
168 typedef struct cfs_rw_semaphore {
169         int foo;
170 } cfs_rw_semaphore_t;
171
172 void cfs_init_rwsem(cfs_rw_semaphore_t *s);
173 void cfs_down_read(cfs_rw_semaphore_t *s);
174 int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
175 void cfs_down_write(cfs_rw_semaphore_t *s);
176 int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
177 void cfs_up_read(cfs_rw_semaphore_t *s);
178 void cfs_up_write(cfs_rw_semaphore_t *s);
179 void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
180 #define CFS_DECLARE_RWSEM(name)  cfs_rw_semaphore_t name = { }
181
182 /*
183  * read-write lock : Need to be investigated more!!
184  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
185  *
186  * - cfs_rwlock_init(x)
187  * - cfs_read_lock(x)
188  * - cfs_read_unlock(x)
189  * - cfs_write_lock(x)
190  * - cfs_write_unlock(x)
191  * - cfs_write_lock_irqsave(x)
192  * - cfs_write_unlock_irqrestore(x)
193  * - cfs_read_lock_irqsave(x)
194  * - cfs_read_unlock_irqrestore(x)
195  */
196 typedef cfs_rw_semaphore_t cfs_rwlock_t;
197 #define CFS_RW_LOCK_UNLOCKED        (cfs_rwlock_t) { }
198
199 #define cfs_rwlock_init(pl)         cfs_init_rwsem(pl)
200
201 #define cfs_read_lock(l)            cfs_down_read(l)
202 #define cfs_read_unlock(l)          cfs_up_read(l)
203 #define cfs_write_lock(l)           cfs_down_write(l)
204 #define cfs_write_unlock(l)         cfs_up_write(l)
205
206 static inline void
207 cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
208 static inline void
209 cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
210
211 static inline void
212 cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
213 static inline void
214 cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
215
216 /*
217  * Atomic for single-threaded user-space
218  */
219 typedef struct { volatile int counter; } cfs_atomic_t;
220
221 #define CFS_ATOMIC_INIT(i) { (i) }
222
223 #define cfs_atomic_read(a) ((a)->counter)
224 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
225 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
226 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
227 #define cfs_atomic_inc(a)  (((a)->counter)++)
228 #define cfs_atomic_dec(a)  do { (a)->counter--; } while (0)
229 #define cfs_atomic_add(b,a)  do {(a)->counter += b;} while (0)
230 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
231 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
232 #define cfs_atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
233 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
234 #define cfs_atomic_dec_return(a)  cfs_atomic_sub_return(1,a)
235 #define cfs_atomic_add_unless(v, a, u) \
236         ((v)->counter != u ? (v)->counter += a : 0)
237 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
238
239 #ifdef HAVE_LIBPTHREAD
240 #include <pthread.h>
241
242 /*
243  * Multi-threaded user space completion APIs
244  */
245
246 typedef struct {
247         int c_done;
248         pthread_cond_t c_cond;
249         pthread_mutex_t c_mut;
250 } cfs_mt_completion_t;
251
252 void cfs_mt_init_completion(cfs_mt_completion_t *c);
253 void cfs_mt_fini_completion(cfs_mt_completion_t *c);
254 void cfs_mt_complete(cfs_mt_completion_t *c);
255 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
256
257 /*
258  * Multi-threaded user space atomic APIs
259  */
260
261 typedef struct { volatile int counter; } cfs_mt_atomic_t;
262
263 int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
264 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
265 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
266 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
267 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
268 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
269 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
270
271 #endif /* HAVE_LIBPTHREAD */
272
273 /**************************************************************************
274  *
275  * Mutex interface.
276  *
277  **************************************************************************/
278 #define CFS_DECLARE_MUTEX(name)     \
279         cfs_semaphore_t name = { 1 }
280
281 #define cfs_mutex_up(s)                     __up(s)
282 #define cfs_up(s)                           cfs_mutex_up(s)
283 #define cfs_mutex_down(s)                   __down(s)
284 #define cfs_down(s)                         cfs_mutex_down(s)
285
286 #define cfs_init_mutex(x)                   cfs_sema_init(x, 1)
287 #define cfs_init_mutex_locked(x)            cfs_sema_init(x, 0)
288
289 typedef struct cfs_mutex {
290         cfs_semaphore_t m_sem;
291 } cfs_mutex_t;
292
293 #define CFS_DEFINE_MUTEX(m) cfs_mutex_t m
294
295 static inline void cfs_mutex_init(cfs_mutex_t *mutex)
296 {
297         cfs_init_mutex(&mutex->m_sem);
298 }
299
300 static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
301 {
302         cfs_mutex_down(&mutex->m_sem);
303 }
304
305 static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
306 {
307         cfs_mutex_up(&mutex->m_sem);
308 }
309
310 /**
311  * Try-lock this mutex.
312  *
313  *
314  * \retval 0 try-lock succeeded (lock acquired).
315  * \retval errno indicates lock contention.
316  */
317 static inline int cfs_mutex_down_trylock(cfs_mutex_t *mutex)
318 {
319         return 0;
320 }
321
322 /**
323  * Try-lock this mutex.
324  *
325  * Note, return values are negation of what is expected from down_trylock() or
326  * pthread_mutex_trylock().
327  *
328  * \retval 1 try-lock succeeded (lock acquired).
329  * \retval 0 indicates lock contention.
330  */
331 static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
332 {
333         return !cfs_mutex_down_trylock(mutex);
334 }
335
336 static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
337 {
338 }
339
340 /*
341  * This is for use in assertions _only_, i.e., this function should always
342  * return 1.
343  *
344  * \retval 1 mutex is locked.
345  *
346  * \retval 0 mutex is not locked. This should never happen.
347  */
348 static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
349 {
350         return 1;
351 }
352
353
354 /**************************************************************************
355  *
356  * Lockdep "implementation". Also see lustre_compat25.h
357  *
358  **************************************************************************/
359
360 typedef struct cfs_lock_class_key {
361         int foo;
362 } cfs_lock_class_key_t;
363
364 static inline void cfs_lockdep_set_class(void *lock,
365                                          cfs_lock_class_key_t *key)
366 {
367 }
368
369 static inline void cfs_lockdep_off(void)
370 {
371 }
372
373 static inline void cfs_lockdep_on(void)
374 {
375 }
376
377 #define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
378 #define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
379 #define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
380 #define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
381
382
383 /* !__KERNEL__ */
384 #endif
385
386 /* __LIBCFS_USER_LOCK_H__ */
387 #endif
388 /*
389  * Local variables:
390  * c-indentation-style: "K&R"
391  * c-basic-offset: 8
392  * tab-width: 8
393  * fill-column: 80
394  * scroll-step: 1
395  * End:
396  */