1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/user-lock.h
38 * Author: Nikita Danilov <nikita@clusterfs.com>
41 #ifndef __LIBCFS_USER_LOCK_H__
42 #define __LIBCFS_USER_LOCK_H__
44 #ifndef __LIBCFS_LIBCFS_H__
45 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
48 /* Implementations of portable synchronization APIs for liblustre */
51 * liblustre is single-threaded, so most "synchronization" APIs are trivial.
53 * XXX Liang: There are several branches share lnet with b_hd_newconfig,
54 * if we define lock APIs at here, there will be conflict with liblustre
61 * The userspace implementations of linux/spinlock.h vary; we just
62 * include our own for all of them
64 #define __LINUX_SPINLOCK_H
67 * Optional debugging (magic stamping and checking ownership) can be added.
78 * - spin_lock_irqsave(x, f)
79 * - spin_unlock_irqrestore(x, f)
81 * No-op implementation.
83 struct spin_lock {int foo;};
85 typedef struct spin_lock spinlock_t;
87 #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
88 #define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
89 #define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
90 #define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
92 void spin_lock_init(spinlock_t *lock);
93 void spin_lock(spinlock_t *lock);
94 void spin_unlock(spinlock_t *lock);
95 int spin_trylock(spinlock_t *lock);
96 void spin_lock_bh_init(spinlock_t *lock);
97 void spin_lock_bh(spinlock_t *lock);
98 void spin_unlock_bh(spinlock_t *lock);
100 static inline int spin_is_locked(spinlock_t *l) {return 1;}
101 static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f){}
102 static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f){}
111 typedef struct semaphore {
115 void sema_init(struct semaphore *s, int val);
116 void __down(struct semaphore *s);
117 void __up(struct semaphore *s);
123 * - init_mutex_locked(x)
127 #define DECLARE_MUTEX(name) \
128 struct semaphore name = { 1 }
130 #define mutex_up(s) __up(s)
131 #define up(s) mutex_up(s)
132 #define mutex_down(s) __down(s)
133 #define down(s) mutex_down(s)
135 #define init_MUTEX(x) sema_init(x, 1)
136 #define init_MUTEX_LOCKED(x) sema_init(x, 0)
137 #define init_mutex(s) init_MUTEX(s)
142 * - init_completion(c)
144 * - wait_for_completion(c)
150 typedef int (*cfs_wait_handler_t) (int timeout);
151 void init_completion_module(cfs_wait_handler_t handler);
152 int call_wait_handler(int timeout);
153 void init_completion(struct completion *c);
154 void complete(struct completion *c);
155 void wait_for_completion(struct completion *c);
156 int wait_for_completion_interruptible(struct completion *c);
158 #define COMPLETION_INITIALIZER(work) \
159 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
161 #define DECLARE_COMPLETION(work) \
162 struct completion work = COMPLETION_INITIALIZER(work)
164 #define INIT_COMPLETION(x) ((x).done = 0)
176 struct rw_semaphore {
180 void init_rwsem(struct rw_semaphore *s);
181 void down_read(struct rw_semaphore *s);
182 int down_read_trylock(struct rw_semaphore *s);
183 void down_write(struct rw_semaphore *s);
184 int down_write_trylock(struct rw_semaphore *s);
185 void up_read(struct rw_semaphore *s);
186 void up_write(struct rw_semaphore *s);
187 void fini_rwsem(struct rw_semaphore *s);
190 * read-write lock : Need to be investigated more!!
191 * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
193 * - DECLARE_RWLOCK(l)
200 typedef struct rw_semaphore rwlock_t;
201 #define RW_LOCK_UNLOCKED (rwlock_t) { }
203 #define rwlock_init(pl) init_rwsem(pl)
205 #define read_lock(l) down_read(l)
206 #define read_unlock(l) up_read(l)
207 #define write_lock(l) down_write(l)
208 #define write_unlock(l) up_write(l)
211 write_lock_irqsave(rwlock_t *l, unsigned long f) { write_lock(l); }
213 write_unlock_irqrestore(rwlock_t *l, unsigned long f) { write_unlock(l); }
216 read_lock_irqsave(rwlock_t *l, unsigned long f) { read_lock(l); }
218 read_unlock_irqrestore(rwlock_t *l, unsigned long f) { read_unlock(l); }
221 * Atomic for user-space
222 * Copied from liblustre
224 typedef struct { volatile int counter; } atomic_t;
226 #define ATOMIC_INIT(i) { (i) }
228 #define atomic_read(a) ((a)->counter)
229 #define atomic_set(a,b) do {(a)->counter = b; } while (0)
230 #define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
231 #define atomic_dec_and_lock(a,b) ((--((a)->counter)) == 0)
232 #define atomic_inc(a) (((a)->counter)++)
233 #define atomic_dec(a) do { (a)->counter--; } while (0)
234 #define atomic_add(b,a) do {(a)->counter += b;} while (0)
235 #define atomic_add_return(n,a) ((a)->counter += n)
236 #define atomic_inc_return(a) atomic_add_return(1,a)
237 #define atomic_sub(b,a) do {(a)->counter -= b;} while (0)
238 #define atomic_sub_return(n,a) ((a)->counter -= n)
239 #define atomic_dec_return(a) atomic_sub_return(1,a)
242 #ifdef HAVE_LIBPTHREAD
249 struct cfs_completion {
251 pthread_cond_t c_cond;
252 pthread_mutex_t c_mut;
255 void cfs_init_completion(struct cfs_completion *c);
256 void cfs_fini_completion(struct cfs_completion *c);
257 void cfs_complete(struct cfs_completion *c);
258 void cfs_wait_for_completion(struct cfs_completion *c);
264 typedef struct { volatile int counter; } cfs_atomic_t;
266 int cfs_atomic_read(cfs_atomic_t *a);
267 void cfs_atomic_set(cfs_atomic_t *a, int b);
268 int cfs_atomic_dec_and_test(cfs_atomic_t *a);
269 void cfs_atomic_inc(cfs_atomic_t *a);
270 void cfs_atomic_dec(cfs_atomic_t *a);
271 void cfs_atomic_add(int b, cfs_atomic_t *a);
272 void cfs_atomic_sub(int b, cfs_atomic_t *a);
274 #endif /* HAVE_LIBPTHREAD */
276 /**************************************************************************
280 **************************************************************************/
283 struct semaphore m_sem;
286 #define DEFINE_MUTEX(m) struct mutex m
288 static inline void mutex_init(struct mutex *mutex)
290 init_mutex(&mutex->m_sem);
293 static inline void mutex_lock(struct mutex *mutex)
295 mutex_down(&mutex->m_sem);
298 static inline void mutex_unlock(struct mutex *mutex)
300 mutex_up(&mutex->m_sem);
304 * Try-lock this mutex.
307 * \retval 0 try-lock succeeded (lock acquired).
308 * \retval errno indicates lock contention.
310 static inline int mutex_down_trylock(struct mutex *mutex)
316 * Try-lock this mutex.
318 * Note, return values are negation of what is expected from down_trylock() or
319 * pthread_mutex_trylock().
321 * \retval 1 try-lock succeeded (lock acquired).
322 * \retval 0 indicates lock contention.
324 static inline int mutex_trylock(struct mutex *mutex)
326 return !mutex_down_trylock(mutex);
329 static inline void mutex_destroy(struct mutex *lock)
334 * This is for use in assertions _only_, i.e., this function should always
337 * \retval 1 mutex is locked.
339 * \retval 0 mutex is not locked. This should never happen.
341 static inline int mutex_is_locked(struct mutex *lock)
347 /**************************************************************************
349 * Lockdep "implementation". Also see lustre_compat25.h
351 **************************************************************************/
353 struct lock_class_key {
357 static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
361 static inline void lockdep_off(void)
365 static inline void lockdep_on(void)
369 /* This has to be a macro, so that can be undefined in kernels that do not
370 * support lockdep. */
371 #define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
372 #define spin_lock_nested(lock, subclass) spin_lock(lock)
373 #define down_read_nested(lock, subclass) down_read(lock)
374 #define down_write_nested(lock, subclass) down_write(lock)
380 /* __LIBCFS_USER_LOCK_H__ */
384 * c-indentation-style: "K&R"