1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/user-lock.c
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 /* Implementations of portable synchronization APIs for liblustre */
44 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
46 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
47 * if we define lock APIs at here, there will be conflict with liblustre
53 #include <libcfs/libcfs.h>
56 * Optional debugging (magic stamping and checking ownership) can be added.
64 * - spin_lock_nested(x, subclass)
68 * - spin_lock_irqsave(x, f)
69 * - spin_unlock_irqrestore(x, f)
71 * No-op implementation.
74 void cfs_spin_lock_init(cfs_spinlock_t *lock)
76 LASSERT(lock != NULL);
80 void cfs_spin_lock(cfs_spinlock_t *lock)
85 void cfs_spin_unlock(cfs_spinlock_t *lock)
90 int cfs_spin_trylock(cfs_spinlock_t *lock)
96 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
98 LASSERT(lock != NULL);
102 void cfs_spin_lock_bh(cfs_spinlock_t *lock)
104 LASSERT(lock != NULL);
108 void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
110 LASSERT(lock != NULL);
122 void cfs_sema_init(cfs_semaphore_t *s, int val)
129 void __down(cfs_semaphore_t *s)
135 void __up(cfs_semaphore_t *s)
145 * - init_completion(c)
147 * - wait_for_completion(c)
150 static cfs_wait_handler_t wait_handler;
152 void cfs_init_completion_module(cfs_wait_handler_t handler)
154 wait_handler = handler;
157 int cfs_call_wait_handler(int timeout)
161 return wait_handler(timeout);
164 void cfs_init_completion(cfs_completion_t *c)
168 cfs_waitq_init(&c->wait);
171 void cfs_complete(cfs_completion_t *c)
175 cfs_waitq_signal(&c->wait);
178 void cfs_wait_for_completion(cfs_completion_t *c)
182 if (cfs_call_wait_handler(1000) < 0)
184 } while (c->done == 0);
187 int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
191 if (cfs_call_wait_handler(1000) < 0)
193 } while (c->done == 0);
208 void cfs_init_rwsem(cfs_rw_semaphore_t *s)
214 void cfs_down_read(cfs_rw_semaphore_t *s)
220 int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
227 void cfs_down_write(cfs_rw_semaphore_t *s)
233 int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
240 void cfs_up_read(cfs_rw_semaphore_t *s)
246 void cfs_up_write(cfs_rw_semaphore_t *s)
252 void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
258 #ifdef HAVE_LIBPTHREAD
261 * Multi-threaded user space completion
264 void cfs_mt_init_completion(cfs_mt_completion_t *c)
268 pthread_mutex_init(&c->c_mut, NULL);
269 pthread_cond_init(&c->c_cond, NULL);
272 void cfs_mt_fini_completion(cfs_mt_completion_t *c)
275 pthread_mutex_destroy(&c->c_mut);
276 pthread_cond_destroy(&c->c_cond);
279 void cfs_mt_complete(cfs_mt_completion_t *c)
282 pthread_mutex_lock(&c->c_mut);
284 pthread_cond_signal(&c->c_cond);
285 pthread_mutex_unlock(&c->c_mut);
288 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
291 pthread_mutex_lock(&c->c_mut);
292 while (c->c_done == 0)
293 pthread_cond_wait(&c->c_cond, &c->c_mut);
295 pthread_mutex_unlock(&c->c_mut);
299 * Multi-threaded user space atomic primitives
302 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
304 int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
308 pthread_mutex_lock(&atomic_guard_lock);
310 pthread_mutex_unlock(&atomic_guard_lock);
314 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
316 pthread_mutex_lock(&atomic_guard_lock);
318 pthread_mutex_unlock(&atomic_guard_lock);
321 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
325 pthread_mutex_lock(&atomic_guard_lock);
327 pthread_mutex_unlock(&atomic_guard_lock);
331 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
333 pthread_mutex_lock(&atomic_guard_lock);
335 pthread_mutex_unlock(&atomic_guard_lock);
338 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
340 pthread_mutex_lock(&atomic_guard_lock);
342 pthread_mutex_unlock(&atomic_guard_lock);
344 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
347 pthread_mutex_lock(&atomic_guard_lock);
349 pthread_mutex_unlock(&atomic_guard_lock);
352 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
354 pthread_mutex_lock(&atomic_guard_lock);
356 pthread_mutex_unlock(&atomic_guard_lock);
359 #endif /* HAVE_LIBPTHREAD */
367 * c-indentation-style: "K&R"