Whamcloud - gitweb
* Landed portals:b_port_step as follows...
[fs/lustre-release.git] / lnet / include / libcfs / darwin / darwin-lock.h
1 #ifndef __LIBCFS_DARWIN_CFS_LOCK_H__
2 #define __LIBCFS_DARWIN_CFS_LOCK_H__
3
4 #ifndef __LIBCFS_LIBCFS_H__
5 #error Do not #include this file directly. #include <libcfs/libcfs.h> instead
6 #endif
7
8 #ifdef  __KERNEL__
9 #include <mach/sync_policy.h>
10 #include <mach/task.h>
11 #include <mach/semaphore.h>
12 #include <mach/mach_traps.h>
13
14 /* spin lock types and operations */
15 #include <kern/simple_lock.h>
16 #include <kern/assert.h>
17 #include <kern/thread.h>
18
19 #include <libcfs/darwin/darwin-types.h>
20 #include <libcfs/darwin/darwin-sync.h>
21
22 /*
23  * spin_lock (use Linux kernel's primitives)
24  * 
25  * - spin_lock_init(x)
26  * - spin_lock(x)
27  * - spin_unlock(x)
28  * - spin_trylock(x)
29  * 
30  * - spin_lock_irqsave(x, f)
31  * - spin_unlock_irqrestore(x, f)
32  */
33 struct spin_lock {
34         struct kspin spin;
35 };
36
37 typedef struct spin_lock spinlock_t;
38
39 static inline void spin_lock_init(spinlock_t *lock)
40 {
41         kspin_init(&lock->spin);
42 }
43
44 static inline void spin_lock(spinlock_t *lock)
45 {
46         kspin_lock(&lock->spin);
47 }
48
49 static inline void spin_unlock(spinlock_t *lock)
50 {
51         kspin_unlock(&lock->spin);
52 }
53
54 static inline int spin_trylock(spinlock_t *lock)
55 {
56         return kspin_trylock(&lock->spin);
57 }
58
59 #define spin_lock_bh(x)         spin_lock(x)
60 #define spin_unlock_bh(x)       spin_unlock(x)
61 #define spin_lock_bh_init(x)    spin_lock_init(x)
62
63 extern boolean_t ml_set_interrupts_enabled(boolean_t enable);
64 #define __disable_irq()         (spl_t) ml_set_interrupts_enabled(FALSE)
65 #define __enable_irq(x)         (void) ml_set_interrupts_enabled(x)
66
67 #define spin_lock_irqsave(s, f)         do{                     \
68                                         f = __disable_irq();    \
69                                         spin_lock(s);   }while(0)
70
71 #define spin_unlock_irqrestore(s, f)    do{                     \
72                                         spin_unlock(s);         \
73                                         __enable_irq(f);}while(0)
74
75 /* 
76  * Semaphore
77  *
78  * - sema_init(x, v)
79  * - __down(x)
80  * - __up(x)
81  */
82 struct semaphore {
83         struct ksem sem;
84 };
85
86 static inline void sema_init(struct semaphore *s, int val)
87 {
88         ksem_init(&s->sem, val);
89 }
90
91 static inline void __down(struct semaphore *s)
92 {
93         ksem_down(&s->sem, 1);
94 }
95
96 static inline void __up(struct semaphore *s)
97 {
98         ksem_up(&s->sem, 1);
99 }
100
101 /*
102  * Mutex:
103  *
104  * - init_mutex(x)
105  * - init_mutex_locked(x)
106  * - mutex_up(x)
107  * - mutex_down(x)
108  */
109
110 #define mutex_up(s)                     __up(s)
111 #define mutex_down(s)                   __down(s)
112
113 #define init_mutex(x)                   sema_init(x, 1)
114 #define init_mutex_locked(x)            sema_init(x, 0)
115
116 /*
117  * Completion:
118  *
119  * - init_completion(c)
120  * - complete(c)
121  * - wait_for_completion(c)
122  */
123 struct completion {
124         /*
125          * Emulate completion by semaphore for now.
126          *
127          * XXX nikita: this is not safe if completion is used to synchronize
128          * exit from kernel daemon thread and kext unloading. In this case
129          * some core function (a la complete_and_exit()) is needed.
130          */
131         struct ksem sem;
132 };
133
134 static inline void init_completion(struct completion *c)
135 {
136         ksem_init(&c->sem, 0);
137 }
138
139 static inline void complete(struct completion *c)
140 {
141         ksem_up(&c->sem, 1);
142 }
143
144 static inline void wait_for_completion(struct completion *c)
145 {
146         ksem_down(&c->sem, 1);
147 }
148
149 /*
150  * rw_semaphore:
151  *
152  * - DECLARE_RWSEM(x)
153  * - init_rwsem(x)
154  * - down_read(x)
155  * - up_read(x)
156  * - down_write(x)
157  * - up_write(x)
158  */
159 struct rw_semaphore {
160         struct krw_sem s;
161 };
162
163 static inline void init_rwsem(struct rw_semaphore *s)
164 {
165         krw_sem_init(&s->s);
166 }
167
168 static inline void down_read(struct rw_semaphore *s)
169 {
170         krw_sem_down_r(&s->s);
171 }
172
173 static inline int down_read_trylock(struct rw_semaphore *s)
174 {
175         int ret = krw_sem_down_r_try(&s->s);
176         return ret == 0? 1: 0;
177 }
178
179 static inline void down_write(struct rw_semaphore *s)
180 {
181         krw_sem_down_w(&s->s);
182 }
183
184 static inline int down_write_trylock(struct rw_semaphore *s)
185 {
186         int ret = krw_sem_down_w_try(&s->s);
187         return ret == 0? 1: 0;
188 }
189
190 static inline void up_read(struct rw_semaphore *s)
191 {
192         krw_sem_up_r(&s->s);
193 }
194
195 static inline void up_write(struct rw_semaphore *s)
196 {
197         krw_sem_up_w(&s->s);
198 }
199
200 /* 
201  * read-write lock : Need to be investigated more!!
202  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
203  *
204  * - DECLARE_RWLOCK(l)
205  * - rwlock_init(x)
206  * - read_lock(x)
207  * - read_unlock(x)
208  * - write_lock(x)
209  * - write_unlock(x)
210  */
211 typedef struct rw_semaphore rwlock_t;
212
213 #define rwlock_init(pl)         init_rwsem(pl)
214
215 #define read_lock(l)            down_read(l)
216 #define read_unlock(l)          up_read(l)
217 #define write_lock(l)           down_write(l)
218 #define write_unlock(l)         up_write(l)
219
220 #define write_lock_irqsave(l, f)        do{                     \
221                                         f = __disable_irq();    \
222                                         write_lock(l);  }while(0)
223
224 #define write_unlock_irqrestore(l, f)   do{                     \
225                                         write_unlock(l);        \
226                                         __enable_irq(f);}while(0)
227
228 #define read_lock_irqsave(l, f)         do{                     \
229                                         f = __disable_irq();    \
230                                         read_lock(l);   }while(0)
231
232 #define read_unlock_irqrestore(l, f)    do{                     \
233                                         read_unlock(l);         \
234                                         __enable_irq(f);}while(0)
235
236 /*
237  * Funnel: 
238  *
239  * Safe funnel in/out
240  */
241
242 #define CFS_DECL_FUNNEL_DATA                    \
243         boolean_t    __funnel_state = FALSE;    \
244         funnel_t    *__funnel
245 #define CFS_DECL_CONE_DATA              CFS_DECL_FUNNEL_DATA
246 #define CFS_DECL_NET_DATA               CFS_DECL_FUNNEL_DATA
247
248 void lustre_cone_in(boolean_t *state, funnel_t **cone);
249 void lustre_cone_ex(boolean_t state, funnel_t *cone);
250
251 #define CFS_CONE_IN lustre_cone_in(&__funnel_state, &__funnel)
252 #define CFS_CONE_EX lustre_cone_ex(__funnel_state, __funnel)
253
254 void lustre_net_in(boolean_t *state, funnel_t **cone);
255 void lustre_net_ex(boolean_t state, funnel_t *cone);
256
257 #define CFS_NET_IN  lustre_net_in(&__funnel_state, &__funnel)
258 #define CFS_NET_EX  lustre_net_ex(__funnel_state, __funnel)
259
260 /* __KERNEL__ */
261 #endif
262
263 /* __XNU_CFS_LOCK_H */
264 #endif