4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/user-lock.c
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 /* Implementations of portable synchronization APIs for liblustre */
44 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
46 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
47 * if we define lock APIs at here, there will be conflict with liblustre
53 #include <libcfs/libcfs.h>
56 * Optional debugging (magic stamping and checking ownership) can be added.
64 * - spin_lock_nested(x, subclass)
68 * - spin_lock_irqsave(x, f)
69 * - spin_unlock_irqrestore(x, f)
71 * No-op implementation.
74 void spin_lock_init(spinlock_t *lock)
76 LASSERT(lock != NULL);
80 void spin_lock(spinlock_t *lock)
85 void spin_unlock(spinlock_t *lock)
90 int spin_trylock(spinlock_t *lock)
96 void spin_lock_bh_init(spinlock_t *lock)
98 LASSERT(lock != NULL);
102 void spin_lock_bh(spinlock_t *lock)
104 LASSERT(lock != NULL);
108 void spin_unlock_bh(spinlock_t *lock)
110 LASSERT(lock != NULL);
122 void sema_init(struct semaphore *s, int val)
129 void __down(struct semaphore *s)
135 int __down_interruptible(struct semaphore *s)
142 void __up(struct semaphore *s)
152 * - init_completion(c)
154 * - wait_for_completion(c)
156 static wait_handler_t wait_handler;
158 void init_completion_module(wait_handler_t handler)
160 wait_handler = handler;
163 int call_wait_handler(int timeout)
167 return wait_handler(timeout);
170 #ifndef HAVE_LIBPTHREAD
171 void init_completion(struct completion *c)
175 cfs_waitq_init(&c->wait);
178 void fini_completion(struct completion *c)
182 void complete(struct completion *c)
186 cfs_waitq_signal(&c->wait);
189 void wait_for_completion(struct completion *c)
193 if (call_wait_handler(1000) < 0)
195 } while (c->done == 0);
198 int wait_for_completion_interruptible(struct completion *c)
202 if (call_wait_handler(1000) < 0)
204 } while (c->done == 0);
207 #endif /* HAVE_LIBPTHREAD */
220 void init_rwsem(struct rw_semaphore *s)
226 void down_read(struct rw_semaphore *s)
232 int down_read_trylock(struct rw_semaphore *s)
239 void down_write(struct rw_semaphore *s)
245 int down_write_trylock(struct rw_semaphore *s)
252 void up_read(struct rw_semaphore *s)
258 void up_write(struct rw_semaphore *s)
264 void fini_rwsem(struct rw_semaphore *s)
270 #ifdef HAVE_LIBPTHREAD
273 * Multi-threaded user space completion
276 void init_completion(struct completion *c)
280 pthread_mutex_init(&c->c_mut, NULL);
281 pthread_cond_init(&c->c_cond, NULL);
284 void fini_completion(struct completion *c)
287 pthread_mutex_destroy(&c->c_mut);
288 pthread_cond_destroy(&c->c_cond);
291 void complete(struct completion *c)
294 pthread_mutex_lock(&c->c_mut);
296 pthread_cond_signal(&c->c_cond);
297 pthread_mutex_unlock(&c->c_mut);
300 void wait_for_completion(struct completion *c)
303 pthread_mutex_lock(&c->c_mut);
304 while (c->c_done == 0)
305 pthread_cond_wait(&c->c_cond, &c->c_mut);
307 pthread_mutex_unlock(&c->c_mut);
311 * Multi-threaded user space atomic primitives
314 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
316 int mt_atomic_read(mt_atomic_t *a)
320 pthread_mutex_lock(&atomic_guard_lock);
322 pthread_mutex_unlock(&atomic_guard_lock);
326 void mt_atomic_set(mt_atomic_t *a, int b)
328 pthread_mutex_lock(&atomic_guard_lock);
330 pthread_mutex_unlock(&atomic_guard_lock);
333 int mt_atomic_dec_and_test(mt_atomic_t *a)
337 pthread_mutex_lock(&atomic_guard_lock);
339 pthread_mutex_unlock(&atomic_guard_lock);
343 void mt_atomic_inc(mt_atomic_t *a)
345 pthread_mutex_lock(&atomic_guard_lock);
347 pthread_mutex_unlock(&atomic_guard_lock);
350 void mt_atomic_dec(mt_atomic_t *a)
352 pthread_mutex_lock(&atomic_guard_lock);
354 pthread_mutex_unlock(&atomic_guard_lock);
356 void mt_atomic_add(int b, mt_atomic_t *a)
359 pthread_mutex_lock(&atomic_guard_lock);
361 pthread_mutex_unlock(&atomic_guard_lock);
364 void mt_atomic_sub(int b, mt_atomic_t *a)
366 pthread_mutex_lock(&atomic_guard_lock);
368 pthread_mutex_unlock(&atomic_guard_lock);
371 #endif /* HAVE_LIBPTHREAD */
379 * c-indentation-style: "K&R"