1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/user-lock.c
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 /* Implementations of portable synchronization APIs for liblustre */
44 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
46 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
47 * if we define lock APIs at here, there will be conflict with liblustre
53 #include <libcfs/libcfs.h>
56 * Optional debugging (magic stamping and checking ownership) can be added.
64 * - spin_lock_nested(x, subclass)
68 * - spin_lock_irqsave(x, f)
69 * - spin_unlock_irqrestore(x, f)
71 * No-op implementation.
74 void cfs_spin_lock_init(cfs_spinlock_t *lock)
76 LASSERT(lock != NULL);
80 void cfs_spin_lock(cfs_spinlock_t *lock)
85 void cfs_spin_unlock(cfs_spinlock_t *lock)
90 int cfs_spin_trylock(cfs_spinlock_t *lock)
96 void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
98 LASSERT(lock != NULL);
102 void cfs_spin_lock_bh(cfs_spinlock_t *lock)
104 LASSERT(lock != NULL);
108 void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
110 LASSERT(lock != NULL);
122 void cfs_sema_init(cfs_semaphore_t *s, int val)
129 void __down(cfs_semaphore_t *s)
135 int __down_interruptible(cfs_semaphore_t *s)
142 void __up(cfs_semaphore_t *s)
152 * - init_completion(c)
154 * - wait_for_completion(c)
157 static cfs_wait_handler_t wait_handler;
159 void cfs_init_completion_module(cfs_wait_handler_t handler)
161 wait_handler = handler;
164 int cfs_call_wait_handler(int timeout)
168 return wait_handler(timeout);
171 void cfs_init_completion(cfs_completion_t *c)
175 cfs_waitq_init(&c->wait);
178 void cfs_complete(cfs_completion_t *c)
182 cfs_waitq_signal(&c->wait);
185 void cfs_wait_for_completion(cfs_completion_t *c)
189 if (cfs_call_wait_handler(1000) < 0)
191 } while (c->done == 0);
194 int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
198 if (cfs_call_wait_handler(1000) < 0)
200 } while (c->done == 0);
215 void cfs_init_rwsem(cfs_rw_semaphore_t *s)
221 void cfs_down_read(cfs_rw_semaphore_t *s)
227 int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
234 void cfs_down_write(cfs_rw_semaphore_t *s)
240 int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
247 void cfs_up_read(cfs_rw_semaphore_t *s)
253 void cfs_up_write(cfs_rw_semaphore_t *s)
259 void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
265 #ifdef HAVE_LIBPTHREAD
268 * Multi-threaded user space completion
271 void cfs_mt_init_completion(cfs_mt_completion_t *c)
275 pthread_mutex_init(&c->c_mut, NULL);
276 pthread_cond_init(&c->c_cond, NULL);
279 void cfs_mt_fini_completion(cfs_mt_completion_t *c)
282 pthread_mutex_destroy(&c->c_mut);
283 pthread_cond_destroy(&c->c_cond);
286 void cfs_mt_complete(cfs_mt_completion_t *c)
289 pthread_mutex_lock(&c->c_mut);
291 pthread_cond_signal(&c->c_cond);
292 pthread_mutex_unlock(&c->c_mut);
295 void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
298 pthread_mutex_lock(&c->c_mut);
299 while (c->c_done == 0)
300 pthread_cond_wait(&c->c_cond, &c->c_mut);
302 pthread_mutex_unlock(&c->c_mut);
306 * Multi-threaded user space atomic primitives
309 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
311 int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
315 pthread_mutex_lock(&atomic_guard_lock);
317 pthread_mutex_unlock(&atomic_guard_lock);
321 void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
323 pthread_mutex_lock(&atomic_guard_lock);
325 pthread_mutex_unlock(&atomic_guard_lock);
328 int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
332 pthread_mutex_lock(&atomic_guard_lock);
334 pthread_mutex_unlock(&atomic_guard_lock);
338 void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
340 pthread_mutex_lock(&atomic_guard_lock);
342 pthread_mutex_unlock(&atomic_guard_lock);
345 void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
347 pthread_mutex_lock(&atomic_guard_lock);
349 pthread_mutex_unlock(&atomic_guard_lock);
351 void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
354 pthread_mutex_lock(&atomic_guard_lock);
356 pthread_mutex_unlock(&atomic_guard_lock);
359 void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
361 pthread_mutex_lock(&atomic_guard_lock);
363 pthread_mutex_unlock(&atomic_guard_lock);
366 #endif /* HAVE_LIBPTHREAD */
374 * c-indentation-style: "K&R"