Whamcloud - gitweb
f826fef26395d059a7ab7cbfb7eb68302077bae4
[fs/lustre-release.git] / lnet / include / libcfs / darwin / darwin-lock.h
1 #ifndef __LIBCFS_DARWIN_CFS_LOCK_H__
2 #define __LIBCFS_DARWIN_CFS_LOCK_H__
3
4 #ifndef __LIBCFS_LIBCFS_H__
5 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
6 #endif
7
8 #ifdef  __KERNEL__
9 #include <mach/sync_policy.h>
10 #include <mach/task.h>
11 #include <mach/semaphore.h>
12 #include <kern/assert.h>
13 #include <kern/thread.h>
14
15 #include <libcfs/darwin/darwin-types.h>
16 #include <libcfs/darwin/darwin-sync.h>
17
18 /*
19  * spin_lock (use Linux kernel's primitives)
20  * 
21  * - spin_lock_init(x)
22  * - spin_lock(x)
23  * - spin_unlock(x)
24  * - spin_trylock(x)
25  * 
26  * - spin_lock_irqsave(x, f)
27  * - spin_unlock_irqrestore(x, f)
28  */
29 struct spin_lock {
30         struct kspin spin;
31 };
32
33 typedef struct spin_lock spinlock_t;
34
35 static inline void spin_lock_init(spinlock_t *lock)
36 {
37         kspin_init(&lock->spin);
38 }
39
40 static inline void spin_lock(spinlock_t *lock)
41 {
42         kspin_lock(&lock->spin);
43 }
44
45 static inline void spin_unlock(spinlock_t *lock)
46 {
47         kspin_unlock(&lock->spin);
48 }
49
50 static inline int spin_trylock(spinlock_t *lock)
51 {
52         return kspin_trylock(&lock->spin);
53 }
54
55 static inline void spin_lock_done(spinlock_t *lock)
56 {
57         kspin_done(&lock->spin);
58 }
59
60 #error "does this lock out timer callbacks?"
61 #define spin_lock_bh(x)         spin_lock(x)
62 #define spin_unlock_bh(x)       spin_unlock(x)
63 #define spin_lock_bh_init(x)    spin_lock_init(x)
64
65 extern boolean_t ml_set_interrupts_enabled(boolean_t enable);
66 #define __disable_irq()         ml_set_interrupts_enabled(FALSE)
67 #define __enable_irq(x)         (void) ml_set_interrupts_enabled(x)
68
69 #define spin_lock_irqsave(s, f)         do{                     \
70                                         f = __disable_irq();    \
71                                         spin_lock(s);   }while(0)
72
73 #define spin_unlock_irqrestore(s, f)    do{                     \
74                                         spin_unlock(s);         \
75                                         __enable_irq(f);}while(0)
76
77 /* 
78  * Semaphore
79  *
80  * - sema_init(x, v)
81  * - __down(x)
82  * - __up(x)
83  */
84 struct semaphore {
85         struct ksem sem;
86 };
87
88 static inline void sema_init(struct semaphore *s, int val)
89 {
90         ksem_init(&s->sem, val);
91 }
92
93 static inline void __down(struct semaphore *s)
94 {
95         ksem_down(&s->sem, 1);
96 }
97
98 static inline void __up(struct semaphore *s)
99 {
100         ksem_up(&s->sem, 1);
101 }
102
103 /*
104  * Mutex:
105  *
106  * - init_mutex(x)
107  * - init_mutex_locked(x)
108  * - mutex_up(x)
109  * - mutex_down(x)
110  */
111
112 #define mutex_up(s)                     __up(s)
113 #define mutex_down(s)                   __down(s)
114
115 #define init_mutex(x)                   sema_init(x, 1)
116 #define init_mutex_locked(x)            sema_init(x, 0)
117
118 /*
119  * Completion:
120  *
121  * - init_completion(c)
122  * - complete(c)
123  * - wait_for_completion(c)
124  */
125 struct completion {
126         /*
127          * Emulate completion by semaphore for now.
128          *
129          * XXX nikita: this is not safe if completion is used to synchronize
130          * exit from kernel daemon thread and kext unloading. In this case
131          * some core function (a la complete_and_exit()) is needed.
132          */
133         struct ksem sem;
134 };
135
136 static inline void init_completion(struct completion *c)
137 {
138         ksem_init(&c->sem, 0);
139 }
140
141 static inline void complete(struct completion *c)
142 {
143         ksem_up(&c->sem, 1);
144 }
145
146 static inline void wait_for_completion(struct completion *c)
147 {
148         ksem_down(&c->sem, 1);
149 }
150
151 /*
152  * rw_semaphore:
153  *
154  * - DECLARE_RWSEM(x)
155  * - init_rwsem(x)
156  * - down_read(x)
157  * - up_read(x)
158  * - down_write(x)
159  * - up_write(x)
160  */
161 struct rw_semaphore {
162         struct krw_sem s;
163 };
164
165 static inline void init_rwsem(struct rw_semaphore *s)
166 {
167         krw_sem_init(&s->s);
168 }
169
170 static inline void fini_rwsem(struct rw_semaphore *s)
171 {
172         krw_sem_done(&s->s);
173 }
174
175 static inline void down_read(struct rw_semaphore *s)
176 {
177         krw_sem_down_r(&s->s);
178 }
179
180 static inline int down_read_trylock(struct rw_semaphore *s)
181 {
182         int ret = krw_sem_down_r_try(&s->s);
183         return ret == 0;
184 }
185
186 static inline void down_write(struct rw_semaphore *s)
187 {
188         krw_sem_down_w(&s->s);
189 }
190
191 static inline int down_write_trylock(struct rw_semaphore *s)
192 {
193         int ret = krw_sem_down_w_try(&s->s);
194         return ret == 0;
195 }
196
197 static inline void up_read(struct rw_semaphore *s)
198 {
199         krw_sem_up_r(&s->s);
200 }
201
202 static inline void up_write(struct rw_semaphore *s)
203 {
204         krw_sem_up_w(&s->s);
205 }
206
207 /* 
208  * read-write lock : Need to be investigated more!!
209  *
210  * - DECLARE_RWLOCK(l)
211  * - rwlock_init(x)
212  * - read_lock(x)
213  * - read_unlock(x)
214  * - write_lock(x)
215  * - write_unlock(x)
216  */
217 typedef struct krw_spin rwlock_t;
218
219 #define rwlock_init(pl)                 krw_spin_init(pl)
220
221 #define read_lock(l)                    krw_spin_down_r(l)
222 #define read_unlock(l)                  krw_spin_up_r(l)
223 #define write_lock(l)                   krw_spin_down_w(l)
224 #define write_unlock(l)                 krw_spin_up_w(l)
225
226 #define write_lock_irqsave(l, f)        do{                     \
227                                         f = __disable_irq();    \
228                                         write_lock(l);  }while(0)
229
230 #define write_unlock_irqrestore(l, f)   do{                     \
231                                         write_unlock(l);        \
232                                         __enable_irq(f);}while(0)
233
234 #define read_lock_irqsave(l, f)         do{                     \
235                                         f = __disable_irq();    \
236                                         read_lock(l);   }while(0)
237
238 #define read_unlock_irqrestore(l, f)    do{                     \
239                                         read_unlock(l);         \
240                                         __enable_irq(f);}while(0)
241 /*
242  * Funnel: 
243  *
244  * Safe funnel in/out
245  */
246 #ifdef __DARWIN8__
247
248 #define CFS_DECL_FUNNEL_DATA
249 #define CFS_DECL_CONE_DATA              DECLARE_FUNNEL_DATA
250 #define CFS_DECL_NET_DATA               DECLARE_FUNNEL_DATA
251 #define CFS_CONE_IN                     do {} while(0)
252 #define CFS_CONE_EX                     do {} while(0)
253
254 #define CFS_NET_IN                      do {} while(0)
255 #define CFS_NET_EX                      do {} while(0)
256
257 #else
258
259 #define CFS_DECL_FUNNEL_DATA                    \
260         boolean_t    __funnel_state = FALSE;    \
261         funnel_t    *__funnel
262 #define CFS_DECL_CONE_DATA              CFS_DECL_FUNNEL_DATA
263 #define CFS_DECL_NET_DATA               CFS_DECL_FUNNEL_DATA
264
265 void lustre_cone_in(boolean_t *state, funnel_t **cone);
266 void lustre_cone_ex(boolean_t state, funnel_t *cone);
267
268 #define CFS_CONE_IN lustre_cone_in(&__funnel_state, &__funnel)
269 #define CFS_CONE_EX lustre_cone_ex(__funnel_state, __funnel)
270
271 void lustre_net_in(boolean_t *state, funnel_t **cone);
272 void lustre_net_ex(boolean_t state, funnel_t *cone);
273
274 #define CFS_NET_IN  lustre_net_in(&__funnel_state, &__funnel)
275 #define CFS_NET_EX  lustre_net_ex(__funnel_state, __funnel)
276
277 #endif
278
279 #else
280 #include <libcfs/user-lock.h>
281 #endif /* __KERNEL__ */
282
283 /* __XNU_CFS_LOCK_H */
284 #endif