4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * libcfs/libcfs/user-lock.c
36 * Author: Nikita Danilov <nikita@clusterfs.com>
39 /* Implementations of portable synchronization APIs for liblustre */
42 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
44 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
45 * if we define lock APIs at here, there will be conflict with liblustre
51 #include <libcfs/libcfs.h>
54 * Optional debugging (magic stamping and checking ownership) can be added.
62 * - spin_lock_nested(x, subclass)
66 * - spin_lock_irqsave(x, f)
67 * - spin_unlock_irqrestore(x, f)
69 * No-op implementation.
72 void spin_lock_init(spinlock_t *lock)
74 LASSERT(lock != NULL);
78 void spin_lock(spinlock_t *lock)
83 void spin_unlock(spinlock_t *lock)
88 int spin_trylock(spinlock_t *lock)
94 void spin_lock_bh_init(spinlock_t *lock)
96 LASSERT(lock != NULL);
100 void spin_lock_bh(spinlock_t *lock)
102 LASSERT(lock != NULL);
106 void spin_unlock_bh(spinlock_t *lock)
108 LASSERT(lock != NULL);
120 void sema_init(struct semaphore *s, int val)
127 void __down(struct semaphore *s)
133 int __down_interruptible(struct semaphore *s)
140 void __up(struct semaphore *s)
150 * - init_completion(c)
152 * - wait_for_completion(c)
155 static wait_handler_t wait_handler;
157 void init_completion_module(wait_handler_t handler)
159 wait_handler = handler;
162 int call_wait_handler(int timeout)
166 return wait_handler(timeout);
169 void init_completion(struct completion *c)
173 cfs_waitq_init(&c->wait);
176 void complete(struct completion *c)
180 cfs_waitq_signal(&c->wait);
183 void wait_for_completion(struct completion *c)
187 if (call_wait_handler(1000) < 0)
189 } while (c->done == 0);
192 int wait_for_completion_interruptible(struct completion *c)
196 if (call_wait_handler(1000) < 0)
198 } while (c->done == 0);
213 void init_rwsem(struct rw_semaphore *s)
219 void down_read(struct rw_semaphore *s)
225 int down_read_trylock(struct rw_semaphore *s)
232 void down_write(struct rw_semaphore *s)
238 int down_write_trylock(struct rw_semaphore *s)
245 void up_read(struct rw_semaphore *s)
251 void up_write(struct rw_semaphore *s)
257 void fini_rwsem(struct rw_semaphore *s)
263 #ifdef HAVE_LIBPTHREAD
266 * Multi-threaded user space completion
269 void mt_init_completion(mt_completion_t *c)
273 pthread_mutex_init(&c->c_mut, NULL);
274 pthread_cond_init(&c->c_cond, NULL);
277 void mt_fini_completion(mt_completion_t *c)
280 pthread_mutex_destroy(&c->c_mut);
281 pthread_cond_destroy(&c->c_cond);
284 void mt_complete(mt_completion_t *c)
287 pthread_mutex_lock(&c->c_mut);
289 pthread_cond_signal(&c->c_cond);
290 pthread_mutex_unlock(&c->c_mut);
293 void mt_wait_for_completion(mt_completion_t *c)
296 pthread_mutex_lock(&c->c_mut);
297 while (c->c_done == 0)
298 pthread_cond_wait(&c->c_cond, &c->c_mut);
300 pthread_mutex_unlock(&c->c_mut);
304 * Multi-threaded user space atomic primitives
307 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
309 int mt_atomic_read(mt_atomic_t *a)
313 pthread_mutex_lock(&atomic_guard_lock);
315 pthread_mutex_unlock(&atomic_guard_lock);
319 void mt_atomic_set(mt_atomic_t *a, int b)
321 pthread_mutex_lock(&atomic_guard_lock);
323 pthread_mutex_unlock(&atomic_guard_lock);
326 int mt_atomic_dec_and_test(mt_atomic_t *a)
330 pthread_mutex_lock(&atomic_guard_lock);
332 pthread_mutex_unlock(&atomic_guard_lock);
336 void mt_atomic_inc(mt_atomic_t *a)
338 pthread_mutex_lock(&atomic_guard_lock);
340 pthread_mutex_unlock(&atomic_guard_lock);
343 void mt_atomic_dec(mt_atomic_t *a)
345 pthread_mutex_lock(&atomic_guard_lock);
347 pthread_mutex_unlock(&atomic_guard_lock);
349 void mt_atomic_add(int b, mt_atomic_t *a)
352 pthread_mutex_lock(&atomic_guard_lock);
354 pthread_mutex_unlock(&atomic_guard_lock);
357 void mt_atomic_sub(int b, mt_atomic_t *a)
359 pthread_mutex_lock(&atomic_guard_lock);
361 pthread_mutex_unlock(&atomic_guard_lock);
364 #endif /* HAVE_LIBPTHREAD */
372 * c-indentation-style: "K&R"