Whamcloud - gitweb
LU-744 osc: add lru pages management - new RPC
[fs/lustre-release.git] / libcfs / include / libcfs / user-lock.h
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  *
34  * libcfs/include/libcfs/user-lock.h
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 #ifndef __LIBCFS_USER_LOCK_H__
40 #define __LIBCFS_USER_LOCK_H__
41
42 #ifndef __LIBCFS_LIBCFS_H__
43 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
44 #endif
45
46 /* Implementations of portable synchronization APIs for liblustre */
47
48 /*
49  * liblustre is single-threaded, so most "synchronization" APIs are trivial.
50  *
51  * XXX Liang: There are several branches share lnet with b_hd_newconfig,
52  * if we define lock APIs at here, there will be conflict with liblustre
53  * in other branches.
54  */
55
56 #ifndef __KERNEL__
57
58 /*
59  * The userspace implementations of linux/spinlock.h vary; we just
60  * include our own for all of them
61  */
62 #define __LINUX_SPINLOCK_H
63
64 /*
65  * Optional debugging (magic stamping and checking ownership) can be added.
66  */
67
68 /*
69  * cfs_spin_lock
70  *
71  * - cfs_spin_lock_init(x)
72  * - cfs_spin_lock(x)
73  * - cfs_spin_unlock(x)
74  * - cfs_spin_trylock(x)
75  * - cfs_spin_lock_bh_init(x)
76  * - cfs_spin_lock_bh(x)
77  * - cfs_spin_unlock_bh(x)
78  *
79  * - cfs_spin_is_locked(x)
80  * - cfs_spin_lock_irqsave(x, f)
81  * - cfs_spin_unlock_irqrestore(x, f)
82  *
83  * No-op implementation.
84  */
85 struct cfs_spin_lock {int foo;};
86
87 typedef struct cfs_spin_lock cfs_spinlock_t;
88
89 #define DEFINE_SPINLOCK(lock)           cfs_spinlock_t lock = { }
90 #define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
91 #define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
92 #define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
93 #define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
94
95 void cfs_spin_lock_init(cfs_spinlock_t *lock);
96 void cfs_spin_lock(cfs_spinlock_t *lock);
97 void cfs_spin_unlock(cfs_spinlock_t *lock);
98 int cfs_spin_trylock(cfs_spinlock_t *lock);
99 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
100 void cfs_spin_lock_bh(cfs_spinlock_t *lock);
101 void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
102
103 static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
104 static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
105 static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
106                                               unsigned long f){}
107
108 /*
109  * Semaphore
110  *
111  * - cfs_sema_init(x, v)
112  * - __down(x)
113  * - __up(x)
114  */
115 typedef struct cfs_semaphore {
116     int foo;
117 } cfs_semaphore_t;
118
119 void cfs_sema_init(cfs_semaphore_t *s, int val);
120 void __up(cfs_semaphore_t *s);
121 void __down(cfs_semaphore_t *s);
122 int __down_interruptible(cfs_semaphore_t *s);
123
124 #define CFS_DEFINE_SEMAPHORE(name)      cfs_semaphore_t name = { 1 }
125
126 #define cfs_up(s)                       __up(s)
127 #define cfs_down(s)                     __down(s)
128 #define cfs_down_interruptible(s)       __down_interruptible(s)
129
130 static inline int cfs_down_trylock(cfs_semaphore_t *sem)
131 {
132         return 0;
133 }
134
135 /*
136  * Completion:
137  *
138  * - cfs_init_completion_module(c)
139  * - cfs_call_wait_handler(t)
140  * - cfs_init_completion(c)
141  * - cfs_complete(c)
142  * - cfs_wait_for_completion(c)
143  * - cfs_wait_for_completion_interruptible(c)
144  */
145 typedef struct {
146         unsigned int done;
147         cfs_waitq_t wait;
148 } cfs_completion_t;
149
150 typedef int (*cfs_wait_handler_t) (int timeout);
151 void cfs_init_completion_module(cfs_wait_handler_t handler);
152 int  cfs_call_wait_handler(int timeout);
153 void cfs_init_completion(cfs_completion_t *c);
154 void cfs_complete(cfs_completion_t *c);
155 void cfs_wait_for_completion(cfs_completion_t *c);
156 int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
157
158 #define CFS_COMPLETION_INITIALIZER(work) \
159         { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
160
161 #define CFS_DECLARE_COMPLETION(work) \
162         cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
163
164 #define CFS_INIT_COMPLETION(x)      ((x).done = 0)
165
166
167 /*
168  * cfs_rw_semaphore:
169  *
170  * - cfs_init_rwsem(x)
171  * - cfs_down_read(x)
172  * - cfs_down_read_trylock(x)
173  * - cfs_down_write(struct cfs_rw_semaphore *s);
174  * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
175  * - cfs_up_read(x)
176  * - cfs_up_write(x)
177  * - cfs_fini_rwsem(x)
178  */
179 typedef struct cfs_rw_semaphore {
180         int foo;
181 } cfs_rw_semaphore_t;
182
183 void cfs_init_rwsem(cfs_rw_semaphore_t *s);
184 void cfs_down_read(cfs_rw_semaphore_t *s);
185 int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
186 void cfs_down_write(cfs_rw_semaphore_t *s);
187 int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
188 void cfs_up_read(cfs_rw_semaphore_t *s);
189 void cfs_up_write(cfs_rw_semaphore_t *s);
190 void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
191 #define CFS_DECLARE_RWSEM(name)  cfs_rw_semaphore_t name = { }
192
193 /*
194  * read-write lock : Need to be investigated more!!
195  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
196  *
197  * - cfs_rwlock_init(x)
198  * - cfs_read_lock(x)
199  * - cfs_read_unlock(x)
200  * - cfs_write_lock(x)
201  * - cfs_write_unlock(x)
202  * - cfs_write_lock_irqsave(x)
203  * - cfs_write_unlock_irqrestore(x)
204  * - cfs_read_lock_irqsave(x)
205  * - cfs_read_unlock_irqrestore(x)
206  */
207 typedef cfs_rw_semaphore_t cfs_rwlock_t;
208 #define DEFINE_RWLOCK(lock)     cfs_rwlock_t lock = { }
209
210 #define cfs_rwlock_init(pl)         cfs_init_rwsem(pl)
211
212 #define cfs_read_lock(l)            cfs_down_read(l)
213 #define cfs_read_unlock(l)          cfs_up_read(l)
214 #define cfs_write_lock(l)           cfs_down_write(l)
215 #define cfs_write_unlock(l)         cfs_up_write(l)
216
217 static inline void
218 cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
219 static inline void
220 cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
221
222 static inline void
223 cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
224 static inline void
225 cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
226
227 /*
228  * Atomic for single-threaded user-space
229  */
230 typedef struct { volatile int counter; } cfs_atomic_t;
231
232 #define CFS_ATOMIC_INIT(i) { (i) }
233
234 #define cfs_atomic_read(a) ((a)->counter)
235 #define cfs_atomic_set(a,b) do {(a)->counter = b; } while (0)
236 #define cfs_atomic_dec_and_test(a) ((--((a)->counter)) == 0)
237 #define cfs_atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
238 #define cfs_atomic_inc(a)  (((a)->counter)++)
239 #define cfs_atomic_dec(a)  do { (a)->counter--; } while (0)
240 #define cfs_atomic_add(b,a)  do {(a)->counter += b;} while (0)
241 #define cfs_atomic_add_return(n,a) ((a)->counter += n)
242 #define cfs_atomic_inc_return(a) cfs_atomic_add_return(1,a)
243 #define cfs_atomic_sub(b,a)  do {(a)->counter -= b;} while (0)
244 #define cfs_atomic_sub_return(n,a) ((a)->counter -= n)
245 #define cfs_atomic_dec_return(a)  cfs_atomic_sub_return(1,a)
246 #define cfs_atomic_add_unless(v, a, u) \
247         ((v)->counter != u ? (v)->counter += a : 0)
248 #define cfs_atomic_inc_not_zero(v) cfs_atomic_add_unless((v), 1, 0)
249 #define cfs_atomic_cmpxchg(v, ov, nv) \
250         ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
251
252 #ifdef HAVE_LIBPTHREAD
253 #include <pthread.h>
254
255 /*
256  * Multi-threaded user space completion APIs
257  */
258
259 typedef struct {
260         int c_done;
261         pthread_cond_t c_cond;
262         pthread_mutex_t c_mut;
263 } cfs_mt_completion_t;
264
265 void cfs_mt_init_completion(cfs_mt_completion_t *c);
266 void cfs_mt_fini_completion(cfs_mt_completion_t *c);
267 void cfs_mt_complete(cfs_mt_completion_t *c);
268 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
269
270 /*
271  * Multi-threaded user space atomic APIs
272  */
273
274 typedef struct { volatile int counter; } cfs_mt_atomic_t;
275
276 int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
277 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
278 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
279 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
280 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
281 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
282 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
283
284 #endif /* HAVE_LIBPTHREAD */
285
286 /**************************************************************************
287  *
288  * Mutex interface.
289  *
290  **************************************************************************/
291 typedef struct cfs_semaphore cfs_mutex_t;
292
293 #define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
294
295 static inline void cfs_mutex_init(cfs_mutex_t *mutex)
296 {
297         cfs_sema_init(mutex, 1);
298 }
299
300 static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
301 {
302         cfs_down(mutex);
303 }
304
305 static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
306 {
307         cfs_up(mutex);
308 }
309
310 static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
311 {
312         return cfs_down_interruptible(mutex);
313 }
314
315 /**
316  * Try-lock this mutex.
317  *
318  * Note, return values are negation of what is expected from down_trylock() or
319  * pthread_mutex_trylock().
320  *
321  * \retval 1 try-lock succeeded (lock acquired).
322  * \retval 0 indicates lock contention.
323  */
324 static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
325 {
326         return !cfs_down_trylock(mutex);
327 }
328
329 static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
330 {
331 }
332
333 /*
334  * This is for use in assertions _only_, i.e., this function should always
335  * return 1.
336  *
337  * \retval 1 mutex is locked.
338  *
339  * \retval 0 mutex is not locked. This should never happen.
340  */
341 static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
342 {
343         return 1;
344 }
345
346
347 /**************************************************************************
348  *
349  * Lockdep "implementation". Also see lustre_compat25.h
350  *
351  **************************************************************************/
352
353 typedef struct cfs_lock_class_key {
354         int foo;
355 } cfs_lock_class_key_t;
356
357 static inline void cfs_lockdep_set_class(void *lock,
358                                          cfs_lock_class_key_t *key)
359 {
360 }
361
362 static inline void cfs_lockdep_off(void)
363 {
364 }
365
366 static inline void cfs_lockdep_on(void)
367 {
368 }
369
370 #define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
371 #define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
372 #define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
373 #define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
374
375
376 /* !__KERNEL__ */
377 #endif
378
379 /* __LIBCFS_USER_LOCK_H__ */
380 #endif
381 /*
382  * Local variables:
383  * c-indentation-style: "K&R"
384  * c-basic-offset: 8
385  * tab-width: 8
386  * fill-column: 80
387  * scroll-step: 1
388  * End:
389  */