Whamcloud - gitweb
LU-2110 mount: do not start osp twice
[fs/lustre-release.git] / libcfs / libcfs / user-lock.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  *
34  * libcfs/libcfs/user-lock.c
35  *
36  * Author: Nikita Danilov <nikita@clusterfs.com>
37  */
38
39 /* Implementations of portable synchronization APIs for liblustre */
40
41 /*
42  * liblustre is single-threaded, so most "synchronization" APIs are trivial.
43  *
44  * XXX Liang: There are several branches share lnet with b_hd_newconfig,
45  * if we define lock APIs at here, there will be conflict with liblustre
46  * in other branches.
47  */
48
49 #ifndef __KERNEL__
50
51 #include <libcfs/libcfs.h>
52
53 /*
54  * Optional debugging (magic stamping and checking ownership) can be added.
55  */
56
57 /*
58  * spin_lock
59  *
60  * - spin_lock_init(x)
61  * - spin_lock(x)
62  * - spin_lock_nested(x, subclass)
63  * - spin_unlock(x)
64  * - spin_trylock(x)
65  *
66  * - spin_lock_irqsave(x, f)
67  * - spin_unlock_irqrestore(x, f)
68  *
69  * No-op implementation.
70  */
71
72 void cfs_spin_lock_init(cfs_spinlock_t *lock)
73 {
74         LASSERT(lock != NULL);
75         (void)lock;
76 }
77
78 void cfs_spin_lock(cfs_spinlock_t *lock)
79 {
80         (void)lock;
81 }
82
83 void cfs_spin_unlock(cfs_spinlock_t *lock)
84 {
85         (void)lock;
86 }
87
88 int cfs_spin_trylock(cfs_spinlock_t *lock)
89 {
90         (void)lock;
91         return 1;
92 }
93
94 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
95 {
96         LASSERT(lock != NULL);
97         (void)lock;
98 }
99
100 void cfs_spin_lock_bh(cfs_spinlock_t *lock)
101 {
102         LASSERT(lock != NULL);
103         (void)lock;
104 }
105
106 void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
107 {
108         LASSERT(lock != NULL);
109         (void)lock;
110 }
111
112 /*
113  * Semaphore
114  *
115  * - sema_init(x, v)
116  * - __down(x)
117  * - __up(x)
118  */
119
120 void cfs_sema_init(cfs_semaphore_t *s, int val)
121 {
122         LASSERT(s != NULL);
123         (void)s;
124         (void)val;
125 }
126
127 void __down(cfs_semaphore_t *s)
128 {
129         LASSERT(s != NULL);
130         (void)s;
131 }
132
133 int __down_interruptible(cfs_semaphore_t *s)
134 {
135         LASSERT(s != NULL);
136         (void)s;
137         return 0;
138 }
139
140 void __up(cfs_semaphore_t *s)
141 {
142         LASSERT(s != NULL);
143         (void)s;
144 }
145
146
147 /*
148  * Completion:
149  *
150  * - init_completion(c)
151  * - complete(c)
152  * - wait_for_completion(c)
153  */
154
155 static cfs_wait_handler_t wait_handler;
156
157 void cfs_init_completion_module(cfs_wait_handler_t handler)
158 {
159         wait_handler = handler;
160 }
161
162 int cfs_call_wait_handler(int timeout)
163 {
164         if (!wait_handler)
165                 return -ENOSYS;
166         return wait_handler(timeout);
167 }
168
169 void cfs_init_completion(cfs_completion_t *c)
170 {
171         LASSERT(c != NULL);
172         c->done = 0;
173         cfs_waitq_init(&c->wait);
174 }
175
176 void cfs_complete(cfs_completion_t *c)
177 {
178         LASSERT(c != NULL);
179         c->done  = 1;
180         cfs_waitq_signal(&c->wait);
181 }
182
183 void cfs_wait_for_completion(cfs_completion_t *c)
184 {
185         LASSERT(c != NULL);
186         do {
187                 if (cfs_call_wait_handler(1000) < 0)
188                         break;
189         } while (c->done == 0);
190 }
191
192 int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
193 {
194         LASSERT(c != NULL);
195         do {
196                 if (cfs_call_wait_handler(1000) < 0)
197                         break;
198         } while (c->done == 0);
199         return 0;
200 }
201
202 /*
203  * rw_semaphore:
204  *
205  * - DECLARE_RWSEM(x)
206  * - init_rwsem(x)
207  * - down_read(x)
208  * - up_read(x)
209  * - down_write(x)
210  * - up_write(x)
211  */
212
213 void cfs_init_rwsem(cfs_rw_semaphore_t *s)
214 {
215         LASSERT(s != NULL);
216         (void)s;
217 }
218
219 void cfs_down_read(cfs_rw_semaphore_t *s)
220 {
221         LASSERT(s != NULL);
222         (void)s;
223 }
224
225 int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
226 {
227         LASSERT(s != NULL);
228         (void)s;
229         return 1;
230 }
231
232 void cfs_down_write(cfs_rw_semaphore_t *s)
233 {
234         LASSERT(s != NULL);
235         (void)s;
236 }
237
238 int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
239 {
240         LASSERT(s != NULL);
241         (void)s;
242         return 1;
243 }
244
245 void cfs_up_read(cfs_rw_semaphore_t *s)
246 {
247         LASSERT(s != NULL);
248         (void)s;
249 }
250
251 void cfs_up_write(cfs_rw_semaphore_t *s)
252 {
253         LASSERT(s != NULL);
254         (void)s;
255 }
256
257 void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
258 {
259         LASSERT(s != NULL);
260         (void)s;
261 }
262
263 #ifdef HAVE_LIBPTHREAD
264
265 /*
266  * Multi-threaded user space completion
267  */
268
269 void cfs_mt_init_completion(cfs_mt_completion_t *c)
270 {
271         LASSERT(c != NULL);
272         c->c_done = 0;
273         pthread_mutex_init(&c->c_mut, NULL);
274         pthread_cond_init(&c->c_cond, NULL);
275 }
276
277 void cfs_mt_fini_completion(cfs_mt_completion_t *c)
278 {
279         LASSERT(c != NULL);
280         pthread_mutex_destroy(&c->c_mut);
281         pthread_cond_destroy(&c->c_cond);
282 }
283
284 void cfs_mt_complete(cfs_mt_completion_t *c)
285 {
286         LASSERT(c != NULL);
287         pthread_mutex_lock(&c->c_mut);
288         c->c_done++;
289         pthread_cond_signal(&c->c_cond);
290         pthread_mutex_unlock(&c->c_mut);
291 }
292
293 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
294 {
295         LASSERT(c != NULL);
296         pthread_mutex_lock(&c->c_mut);
297         while (c->c_done == 0)
298                 pthread_cond_wait(&c->c_cond, &c->c_mut);
299         c->c_done--;
300         pthread_mutex_unlock(&c->c_mut);
301 }
302
303 /*
304  * Multi-threaded user space atomic primitives
305  */
306
307 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
308
309 int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
310 {
311         int r;
312
313         pthread_mutex_lock(&atomic_guard_lock);
314         r = a->counter;
315         pthread_mutex_unlock(&atomic_guard_lock);
316         return r;
317 }
318
319 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
320 {
321         pthread_mutex_lock(&atomic_guard_lock);
322         a->counter = b;
323         pthread_mutex_unlock(&atomic_guard_lock);
324 }
325
326 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
327 {
328         int r;
329
330         pthread_mutex_lock(&atomic_guard_lock);
331         r = --a->counter;
332         pthread_mutex_unlock(&atomic_guard_lock);
333         return (r == 0);
334 }
335
336 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
337 {
338         pthread_mutex_lock(&atomic_guard_lock);
339         ++a->counter;
340         pthread_mutex_unlock(&atomic_guard_lock);
341 }
342
343 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
344 {
345         pthread_mutex_lock(&atomic_guard_lock);
346         --a->counter;
347         pthread_mutex_unlock(&atomic_guard_lock);
348 }
349 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
350
351 {
352         pthread_mutex_lock(&atomic_guard_lock);
353         a->counter += b;
354         pthread_mutex_unlock(&atomic_guard_lock);
355 }
356
357 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
358 {
359         pthread_mutex_lock(&atomic_guard_lock);
360         a->counter -= b;
361         pthread_mutex_unlock(&atomic_guard_lock);
362 }
363
364 #endif /* HAVE_LIBPTHREAD */
365
366
367 /* !__KERNEL__ */
368 #endif
369
370 /*
371  * Local variables:
372  * c-indentation-style: "K&R"
373  * c-basic-offset: 8
374  * tab-width: 8
375  * fill-column: 80
376  * scroll-step: 1
377  * End:
378  */