Whamcloud - gitweb
convert uint64_t to __u64
[fs/lustre-release.git] / lnet / libcfs / darwin / darwin-sync.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2004 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or modify it under
11  *   the terms of version 2 of the GNU General Public License as published by
12  *   the Free Software Foundation. Lustre is distributed in the hope that it
13  *   will be useful, but WITHOUT ANY WARRANTY; without even the implied
14  *   warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details. You should have received a
16  *   copy of the GNU General Public License along with Lustre; if not, write
17  *   to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
18  *   USA.
19  */
20
21 /*
22  * xnu_sync.c
23  *
24  * Created by nikita on Sun Jul 18 2004.
25  *
26  * XNU synchronization primitives.
27  */
28
29 /*
30  * This file contains very simplistic implementations of (saner) API for
31  * basic synchronization primitives:
32  *
33  *     - spin-lock          (kspin)
34  *
35  *     - semaphore          (ksem)
36  *
37  *     - mutex              (kmut)
38  *
39  *     - condition variable (kcond)
40  *
41  *     - wait-queue         (ksleep_chan and ksleep_link)
42  *
43  *     - timer              (ktimer)
44  *
45  * A lot can be optimized here.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LNET
49
50 #ifdef __DARWIN8__
51 # include <kern/locks.h>
52 #else
53 # include <mach/mach_types.h>
54 # include <sys/types.h>
55 # include <kern/simple_lock.h>
56 #endif
57
58 #include <libcfs/libcfs.h>
59 #include <libcfs/kp30.h>
60
61 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
62
63 #ifdef HAVE_GET_PREEMPTION_LEVEL
64 extern int get_preemption_level(void);
65 #else
66 #define get_preemption_level() (0)
67 #endif
68
69 #if SMP
70 #ifdef __DARWIN8__
71
72 static lck_grp_t       *cfs_lock_grp = NULL;
73 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
74
75 /* hw_lock_* are not exported by Darwin8 */
76 static inline void xnu_spin_init(xnu_spin_t *s)
77 {
78         SLASSERT(cfs_lock_grp != NULL);
79         //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
80         lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
81 }
82
83 static inline void xnu_spin_done(xnu_spin_t *s)
84 {
85         SLASSERT(cfs_lock_grp != NULL);
86         //lck_spin_free(*s, cfs_lock_grp);
87         //*s = NULL;
88         lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
89 }
90
91 #define xnu_spin_lock(s)        lck_spin_lock((lck_spin_t *)(s))
92 #define xnu_spin_unlock(s)      lck_spin_unlock((lck_spin_t *)(s))
93
94 #warning "Darwin8 does not export lck_spin_try_lock"
95 #define xnu_spin_try(s)         (1)
96
97 #else /* DARWIN8 */
98 extern void                     hw_lock_init(hw_lock_t);
99 extern void                     hw_lock_lock(hw_lock_t);
100 extern void                     hw_lock_unlock(hw_lock_t);
101 extern unsigned int             hw_lock_to(hw_lock_t, unsigned int);
102 extern unsigned int             hw_lock_try(hw_lock_t);
103 extern unsigned int             hw_lock_held(hw_lock_t);
104
105 #define xnu_spin_init(s)        hw_lock_init(s)
106 #define xnu_spin_done(s)        do {} while (0)
107 #define xnu_spin_lock(s)        hw_lock_lock(s)
108 #define xnu_spin_unlock(s)      hw_lock_unlock(s)
109 #define xnu_spin_try(s)         hw_lock_try(s)
110 #endif /* DARWIN8 */
111
112 #else /* SMP */
113 #define xnu_spin_init(s)        do {} while (0)
114 #define xnu_spin_done(s)        do {} while (0)
115 #define xnu_spin_lock(s)        do {} while (0)
116 #define xnu_spin_unlock(s)      do {} while (0)
117 #define xnu_spin_try(s)         (1)
118 #endif /* SMP */
119
120 /*
121  * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
122  * example), uses spin-locks, so debugging output here may lead to nasty
123  * surprises.
124  *
125  * In uniprocessor version of spin-lock. Only checks.
126  */
127
128 void kspin_init(struct kspin *spin)
129 {
130         SLASSERT(spin != NULL);
131         xnu_spin_init(&spin->lock);
132         ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
133         ON_SYNC_DEBUG(spin->owner = NULL);
134 }
135
136 void kspin_done(struct kspin *spin)
137 {
138         SLASSERT(spin != NULL);
139         SLASSERT(spin->magic == KSPIN_MAGIC);
140         SLASSERT(spin->owner == NULL);
141         xnu_spin_done(&spin->lock);
142 }
143
144 void kspin_lock(struct kspin *spin)
145 {
146         SLASSERT(spin != NULL);
147         SLASSERT(spin->magic == KSPIN_MAGIC);
148         SLASSERT(spin->owner != current_thread());
149
150         /*
151          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
152          * from here: this will lead to infinite recursion.
153          */
154
155         xnu_spin_lock(&spin->lock);
156         SLASSERT(spin->owner == NULL);
157         ON_SYNC_DEBUG(spin->owner = current_thread());
158 }
159
160 void kspin_unlock(struct kspin *spin)
161 {
162         /*
163          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
164          * from here: this will lead to infinite recursion.
165          */
166
167         SLASSERT(spin != NULL);
168         SLASSERT(spin->magic == KSPIN_MAGIC);
169         SLASSERT(spin->owner == current_thread());
170         ON_SYNC_DEBUG(spin->owner = NULL);
171         xnu_spin_unlock(&spin->lock);
172 }
173
174 int  kspin_trylock(struct kspin *spin)
175 {
176         SLASSERT(spin != NULL);
177         SLASSERT(spin->magic == KSPIN_MAGIC);
178
179         if (xnu_spin_try(&spin->lock)) {
180                 SLASSERT(spin->owner == NULL);
181                 ON_SYNC_DEBUG(spin->owner = current_thread());
182                 return 1;
183         } else
184                 return 0;
185 }
186
187 #if XNU_SYNC_DEBUG
188 int kspin_islocked(struct kspin *spin)
189 {
190         SLASSERT(spin != NULL);
191         SLASSERT(spin->magic == KSPIN_MAGIC);
192         return spin->owner == current_thread();
193 }
194
195 int kspin_isnotlocked(struct kspin *spin)
196 {
197         SLASSERT(spin != NULL);
198         SLASSERT(spin->magic == KSPIN_MAGIC);
199         return spin->owner != current_thread();
200 }
201 #endif
202
203 /*
204  * read/write spin-lock
205  */
206 void krw_spin_init(struct krw_spin *rwspin)
207 {
208         SLASSERT(rwspin != NULL);
209
210         kspin_init(&rwspin->guard);
211         rwspin->count = 0;
212         ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
213 }
214
215 void krw_spin_done(struct krw_spin *rwspin)
216 {
217         SLASSERT(rwspin != NULL);
218         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
219         SLASSERT(rwspin->count == 0);
220         kspin_done(&rwspin->guard);
221 }
222
223 void krw_spin_down_r(struct krw_spin *rwspin)
224 {
225         int i;
226         SLASSERT(rwspin != NULL);
227         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
228
229         kspin_lock(&rwspin->guard);
230         while(rwspin->count < 0) {
231                 i = -1;
232                 kspin_unlock(&rwspin->guard);
233                 while (--i != 0 && rwspin->count < 0)
234                         continue;
235                 kspin_lock(&rwspin->guard);
236         }
237         ++ rwspin->count;
238         kspin_unlock(&rwspin->guard);
239 }
240
241 void krw_spin_down_w(struct krw_spin *rwspin)
242 {
243         int i;
244         SLASSERT(rwspin != NULL);
245         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
246
247         kspin_lock(&rwspin->guard);
248         while (rwspin->count != 0) {
249                 i = -1;
250                 kspin_unlock(&rwspin->guard);
251                 while (--i != 0 && rwspin->count != 0)
252                         continue;
253                 kspin_lock(&rwspin->guard);
254         }
255         rwspin->count = -1;
256         kspin_unlock(&rwspin->guard);
257 }
258
259 void krw_spin_up_r(struct krw_spin *rwspin)
260 {
261         SLASSERT(rwspin != NULL);
262         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
263         SLASSERT(rwspin->count > 0);
264
265         kspin_lock(&rwspin->guard);
266         -- rwspin->count;
267         kspin_unlock(&rwspin->guard);
268 }
269
270 void krw_spin_up_w(struct krw_spin *rwspin)
271 {
272         SLASSERT(rwspin != NULL);
273         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
274         SLASSERT(rwspin->count == -1);
275
276         kspin_lock(&rwspin->guard);
277         rwspin->count = 0;
278         kspin_unlock(&rwspin->guard);
279 }
280
281 /*
282  * semaphore 
283  */
284 #ifdef __DARWIN8__
285
286 #define xnu_waitq_init(q, a)            do {} while (0)
287 #define xnu_waitq_done(q)               do {} while (0)
288 #define xnu_waitq_wakeup_one(q, e, s)   ({wakeup_one((void *)(e)); KERN_SUCCESS;})
289 #define xnu_waitq_wakeup_all(q, e, s)   ({wakeup((void *)(e)); KERN_SUCCESS;})
290 #define xnu_waitq_assert_wait(q, e, s)  assert_wait((e), s)
291
292 #else /* DARWIN8 */
293
294 #define xnu_waitq_init(q, a)            wait_queue_init((q), a)
295 #define xnu_waitq_done(q)               do {} while (0)
296 #define xnu_waitq_wakeup_one(q, e, s)   wait_queue_wakeup_one((q), (event_t)(e), s)
297 #define xnu_waitq_wakeup_all(q, e, s)   wait_queue_wakeup_all((q), (event_t)(e), s)
298 #define xnu_waitq_assert_wait(q, e, s)  wait_queue_assert_wait((q), (event_t)(e), s)
299
300 #endif /* DARWIN8 */
301 void ksem_init(struct ksem *sem, int value)
302 {
303         SLASSERT(sem != NULL);
304         kspin_init(&sem->guard);
305         xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
306         sem->value = value;
307         ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
308 }
309
310 void ksem_done(struct ksem *sem)
311 {
312         SLASSERT(sem != NULL);
313         SLASSERT(sem->magic == KSEM_MAGIC);
314         /*
315          * XXX nikita: cannot check that &sem->q is empty because
316          * wait_queue_empty() is Apple private API.
317          */
318         kspin_done(&sem->guard);
319 }
320
321 int ksem_up(struct ksem *sem, int value)
322 {
323         int result;
324
325         SLASSERT(sem != NULL);
326         SLASSERT(sem->magic == KSEM_MAGIC);
327         SLASSERT(value >= 0);
328
329         kspin_lock(&sem->guard);
330         sem->value += value;
331         if (sem->value == 0)
332                 result = xnu_waitq_wakeup_one(&sem->q, sem,
333                                               THREAD_AWAKENED);
334         else
335                 result = xnu_waitq_wakeup_all(&sem->q, sem,
336                                               THREAD_AWAKENED);
337         kspin_unlock(&sem->guard);
338         SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
339         return (result == KERN_SUCCESS) ? 0 : 1;
340 }
341
342 void ksem_down(struct ksem *sem, int value)
343 {
344         int result;
345
346         SLASSERT(sem != NULL);
347         SLASSERT(sem->magic == KSEM_MAGIC);
348         SLASSERT(value >= 0);
349         SLASSERT(get_preemption_level() == 0);
350
351         kspin_lock(&sem->guard);
352         while (sem->value < value) {
353                 result = xnu_waitq_assert_wait(&sem->q, sem,
354                                                THREAD_UNINT);
355                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
356                 kspin_unlock(&sem->guard);
357                 if (result == THREAD_WAITING)
358                         thread_block(THREAD_CONTINUE_NULL);
359                 kspin_lock(&sem->guard);
360         }
361         sem->value -= value;
362         kspin_unlock(&sem->guard);
363 }
364
365 int ksem_trydown(struct ksem *sem, int value)
366 {
367         int result;
368
369         SLASSERT(sem != NULL);
370         SLASSERT(sem->magic == KSEM_MAGIC);
371         SLASSERT(value >= 0);
372
373         kspin_lock(&sem->guard);
374         if (sem->value >= value) {
375                 sem->value -= value;
376                 result = 0;
377         } else
378                 result = -EBUSY;
379         kspin_unlock(&sem->guard);
380         return result;
381 }
382
383 void kmut_init(struct kmut *mut)
384 {
385         SLASSERT(mut != NULL);
386         ksem_init(&mut->s, 1);
387         ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
388         ON_SYNC_DEBUG(mut->owner = NULL);
389 }
390
391 void kmut_done(struct kmut *mut)
392 {
393         SLASSERT(mut != NULL);
394         SLASSERT(mut->magic == KMUT_MAGIC);
395         SLASSERT(mut->owner == NULL);
396         ksem_done(&mut->s);
397 }
398
399 void kmut_lock(struct kmut *mut)
400 {
401         SLASSERT(mut != NULL);
402         SLASSERT(mut->magic == KMUT_MAGIC);
403         SLASSERT(mut->owner != current_thread());
404         SLASSERT(get_preemption_level() == 0);
405
406         ksem_down(&mut->s, 1);
407         ON_SYNC_DEBUG(mut->owner = current_thread());
408 }
409
410 void kmut_unlock(struct kmut *mut)
411 {
412         SLASSERT(mut != NULL);
413         SLASSERT(mut->magic == KMUT_MAGIC);
414         SLASSERT(mut->owner == current_thread());
415
416         ON_SYNC_DEBUG(mut->owner = NULL);
417         ksem_up(&mut->s, 1);
418 }
419
420 int kmut_trylock(struct kmut *mut)
421 {
422         SLASSERT(mut != NULL);
423         SLASSERT(mut->magic == KMUT_MAGIC);
424         return ksem_trydown(&mut->s, 1);
425 }
426
427 #if XNU_SYNC_DEBUG
428 int kmut_islocked(struct kmut *mut)
429 {
430         SLASSERT(mut != NULL);
431         SLASSERT(mut->magic == KMUT_MAGIC);
432         return mut->owner == current_thread();
433 }
434
435 int kmut_isnotlocked(struct kmut *mut)
436 {
437         SLASSERT(mut != NULL);
438         SLASSERT(mut->magic == KMUT_MAGIC);
439         return mut->owner != current_thread();
440 }
441 #endif
442
443
444 void kcond_init(struct kcond *cond)
445 {
446         SLASSERT(cond != NULL);
447
448         kspin_init(&cond->guard);
449         cond->waiters = NULL;
450         ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
451 }
452
453 void kcond_done(struct kcond *cond)
454 {
455         SLASSERT(cond != NULL);
456         SLASSERT(cond->magic == KCOND_MAGIC);
457         SLASSERT(cond->waiters == NULL);
458         kspin_done(&cond->guard);
459 }
460
461 void kcond_wait(struct kcond *cond, struct kspin *lock)
462 {
463         struct kcond_link link;
464
465         SLASSERT(cond != NULL);
466         SLASSERT(lock != NULL);
467         SLASSERT(cond->magic == KCOND_MAGIC);
468         SLASSERT(kspin_islocked(lock));
469
470         ksem_init(&link.sem, 0);
471         kspin_lock(&cond->guard);
472         link.next = cond->waiters;
473         cond->waiters = &link;
474         kspin_unlock(&cond->guard);
475         kspin_unlock(lock);
476
477         ksem_down(&link.sem, 1);
478
479         kspin_lock(&cond->guard);
480         kspin_unlock(&cond->guard);
481         kspin_lock(lock);
482 }
483
484 void kcond_wait_guard(struct kcond *cond)
485 {
486         struct kcond_link link;
487
488         SLASSERT(cond != NULL);
489         SLASSERT(cond->magic == KCOND_MAGIC);
490         SLASSERT(kspin_islocked(&cond->guard));
491
492         ksem_init(&link.sem, 0);
493         link.next = cond->waiters;
494         cond->waiters = &link;
495         kspin_unlock(&cond->guard);
496
497         ksem_down(&link.sem, 1);
498
499         kspin_lock(&cond->guard);
500 }
501
502 void kcond_signal_guard(struct kcond *cond)
503 {
504         struct kcond_link *link;
505
506         SLASSERT(cond != NULL);
507         SLASSERT(cond->magic == KCOND_MAGIC);
508         SLASSERT(kspin_islocked(&cond->guard));
509
510         link = cond->waiters;
511         if (link != NULL) {
512                 cond->waiters = link->next;
513                 ksem_up(&link->sem, 1);
514         }
515 }
516
517 void kcond_signal(struct kcond *cond)
518 {
519         SLASSERT(cond != NULL);
520         SLASSERT(cond->magic == KCOND_MAGIC);
521
522         kspin_lock(&cond->guard);
523         kcond_signal_guard(cond);
524         kspin_unlock(&cond->guard);
525 }
526
527 void kcond_broadcast_guard(struct kcond *cond)
528 {
529         struct kcond_link *link;
530
531         SLASSERT(cond != NULL);
532         SLASSERT(cond->magic == KCOND_MAGIC);
533         SLASSERT(kspin_islocked(&cond->guard));
534
535         for (link = cond->waiters; link != NULL; link = link->next)
536                 ksem_up(&link->sem, 1);
537         cond->waiters = NULL;
538 }
539
540 void kcond_broadcast(struct kcond *cond)
541 {
542         SLASSERT(cond != NULL);
543         SLASSERT(cond->magic == KCOND_MAGIC);
544
545         kspin_lock(&cond->guard);
546         kcond_broadcast_guard(cond);
547         kspin_unlock(&cond->guard);
548 }
549
550 void krw_sem_init(struct krw_sem *sem)
551 {
552         SLASSERT(sem != NULL);
553
554         kcond_init(&sem->cond);
555         sem->count = 0;
556         ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
557 }
558
559 void krw_sem_done(struct krw_sem *sem)
560 {
561         SLASSERT(sem != NULL);
562         SLASSERT(sem->magic == KRW_MAGIC);
563         SLASSERT(sem->count == 0);
564         kcond_done(&sem->cond);
565 }
566
567 void krw_sem_down_r(struct krw_sem *sem)
568 {
569         SLASSERT(sem != NULL);
570         SLASSERT(sem->magic == KRW_MAGIC);
571         SLASSERT(get_preemption_level() == 0);
572
573         kspin_lock(&sem->cond.guard);
574         while (sem->count < 0)
575                 kcond_wait_guard(&sem->cond);
576         ++ sem->count;
577         kspin_unlock(&sem->cond.guard);
578 }
579
580 int krw_sem_down_r_try(struct krw_sem *sem)
581 {
582         SLASSERT(sem != NULL);
583         SLASSERT(sem->magic == KRW_MAGIC);
584
585         kspin_lock(&sem->cond.guard);
586         if (sem->count < 0) {
587                 kspin_unlock(&sem->cond.guard);
588                 return -EBUSY;
589         }
590         ++ sem->count;
591         kspin_unlock(&sem->cond.guard);
592         return 0;
593 }
594
595 void krw_sem_down_w(struct krw_sem *sem)
596 {
597         SLASSERT(sem != NULL);
598         SLASSERT(sem->magic == KRW_MAGIC);
599         SLASSERT(get_preemption_level() == 0);
600
601         kspin_lock(&sem->cond.guard);
602         while (sem->count != 0)
603                 kcond_wait_guard(&sem->cond);
604         sem->count = -1;
605         kspin_unlock(&sem->cond.guard);
606 }
607
608 int krw_sem_down_w_try(struct krw_sem *sem)
609 {
610         SLASSERT(sem != NULL);
611         SLASSERT(sem->magic == KRW_MAGIC);
612
613         kspin_lock(&sem->cond.guard);
614         if (sem->count != 0) {
615                 kspin_unlock(&sem->cond.guard);
616                 return -EBUSY;
617         }
618         sem->count = -1;
619         kspin_unlock(&sem->cond.guard);
620         return 0;
621 }
622
623 void krw_sem_up_r(struct krw_sem *sem)
624 {
625         SLASSERT(sem != NULL);
626         SLASSERT(sem->magic == KRW_MAGIC);
627         SLASSERT(sem->count > 0);
628
629         kspin_lock(&sem->cond.guard);
630         -- sem->count;
631         if (sem->count == 0)
632                 kcond_broadcast_guard(&sem->cond);
633         kspin_unlock(&sem->cond.guard);
634 }
635
636 void krw_sem_up_w(struct krw_sem *sem)
637 {
638         SLASSERT(sem != NULL);
639         SLASSERT(sem->magic == KRW_MAGIC);
640         SLASSERT(sem->count == -1);
641
642         kspin_lock(&sem->cond.guard);
643         sem->count = 0;
644         kspin_unlock(&sem->cond.guard);
645         kcond_broadcast(&sem->cond);
646 }
647
648 void ksleep_chan_init(struct ksleep_chan *chan)
649 {
650         SLASSERT(chan != NULL);
651
652         kspin_init(&chan->guard);
653         CFS_INIT_LIST_HEAD(&chan->waiters);
654         ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
655 }
656
657 void ksleep_chan_done(struct ksleep_chan *chan)
658 {
659         SLASSERT(chan != NULL);
660         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
661         SLASSERT(list_empty(&chan->waiters));
662         kspin_done(&chan->guard);
663 }
664
665 void ksleep_link_init(struct ksleep_link *link)
666 {
667         SLASSERT(link != NULL);
668
669         CFS_INIT_LIST_HEAD(&link->linkage);
670         link->flags = 0;
671         link->event = current_thread();
672         link->hits  = 0;
673         link->forward = NULL;
674         ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
675 }
676
677 void ksleep_link_done(struct ksleep_link *link)
678 {
679         SLASSERT(link != NULL);
680         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
681         SLASSERT(list_empty(&link->linkage));
682 }
683
684 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
685 {
686         SLASSERT(chan != NULL);
687         SLASSERT(link != NULL);
688         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
689         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
690         SLASSERT(list_empty(&link->linkage));
691
692         kspin_lock(&chan->guard);
693         if (link->flags & KSLEEP_EXCLUSIVE)
694                 list_add_tail(&link->linkage, &chan->waiters);
695         else
696                 list_add(&link->linkage, &chan->waiters);
697         kspin_unlock(&chan->guard);
698 }
699
700 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
701 {
702         SLASSERT(chan != NULL);
703         SLASSERT(link != NULL);
704         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
705         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
706
707         kspin_lock(&chan->guard);
708         list_del_init(&link->linkage);
709         kspin_unlock(&chan->guard);
710 }
711
712 static int has_hits(struct ksleep_chan *chan, event_t event)
713 {
714         struct ksleep_link *scan;
715
716         SLASSERT(kspin_islocked(&chan->guard));
717         list_for_each_entry(scan, &chan->waiters, linkage) {
718                 if (scan->event == event && scan->hits > 0) {
719                         /* consume hit */
720                         -- scan->hits;
721                         return 1;
722                 }
723         }
724         return 0;
725 }
726
727 static void add_hit(struct ksleep_chan *chan, event_t event)
728 {
729         struct ksleep_link *scan;
730
731         /*
732          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
733          * from here: this will lead to infinite recursion.
734          */
735
736         SLASSERT(kspin_islocked(&chan->guard));
737         list_for_each_entry(scan, &chan->waiters, linkage) {
738                 if (scan->event == event) {
739                         ++ scan->hits;
740                         break;
741                 }
742         }
743 }
744
745 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
746 {
747         event_t event;
748         int     result;
749
750         ENTRY;
751
752         SLASSERT(chan != NULL);
753         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
754         SLASSERT(get_preemption_level() == 0);
755
756         event = current_thread();
757         kspin_lock(&chan->guard);
758         if (!has_hits(chan, event)) {
759                 result = assert_wait(event, state);
760                 kspin_unlock(&chan->guard);
761                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
762                 if (result == THREAD_WAITING)
763                         thread_block(THREAD_CONTINUE_NULL);
764         } else
765                 kspin_unlock(&chan->guard);
766         EXIT;
767 }
768
769 /*
770  * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
771  * sleep time (non-zero only if thread was waken by a signal (not currently
772  * implemented), or waitq was already in the "signalled" state).
773  */
774 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
775                          cfs_task_state_t state,
776                          __u64 timeout)
777 {
778         event_t event;
779
780         ENTRY;
781
782         SLASSERT(chan != NULL);
783         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
784         SLASSERT(get_preemption_level() == 0);
785
786         event = current_thread();
787         kspin_lock(&chan->guard);
788         if (!has_hits(chan, event)) {
789                 int      result;
790                 __u64 expire;
791                 result = assert_wait(event, state);
792                 if (timeout > 0) {
793                         /*
794                          * arm a timer. thread_set_timer()'s first argument is
795                          * uint32_t, so we have to cook deadline ourselves.
796                          */
797                         nanoseconds_to_absolutetime(timeout, &expire);
798                         clock_absolutetime_interval_to_deadline(expire, &expire);
799                         thread_set_timer_deadline(expire);
800                 }
801                 kspin_unlock(&chan->guard);
802                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
803                 if (result == THREAD_WAITING)
804                         result = thread_block(THREAD_CONTINUE_NULL);
805                 thread_cancel_timer();
806
807                 if (result == THREAD_TIMED_OUT)
808                         timeout = 0;
809                 else {
810                         __u64 now;
811                         clock_get_uptime(&now);
812                         if (expire > now)
813                                 absolutetime_to_nanoseconds(expire - now, &timeout);
814                         else
815                                 timeout = 0;
816                 }
817         } else  {
818                 /* just return timeout, because I've got event and don't need to wait */
819                 kspin_unlock(&chan->guard);
820         }
821
822         RETURN(timeout);
823 }
824
825 /*
826  * wake up single exclusive waiter (plus some arbitrary number of *
827  * non-exclusive)
828  */
829 void ksleep_wake(struct ksleep_chan *chan)
830 {
831         /*
832          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
833          * from here: this will lead to infinite recursion.
834          */
835         ksleep_wake_nr(chan, 1);
836 }
837
838 /*
839  * wake up all waiters on @chan
840  */
841 void ksleep_wake_all(struct ksleep_chan *chan)
842 {
843         ENTRY;
844         ksleep_wake_nr(chan, 0);
845         EXIT;
846 }
847
848 /*
849  * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
850  * number of non-exclusive. If @nr is 0, wake up all waiters.
851  */
852 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
853 {
854         struct ksleep_link *scan;
855         int result;
856
857         /*
858          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
859          * from here: this will lead to infinite recursion.
860          */
861
862         SLASSERT(chan != NULL);
863         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
864
865         kspin_lock(&chan->guard);
866         list_for_each_entry(scan, &chan->waiters, linkage) {
867                 struct ksleep_chan *forward;
868
869                 forward = scan->forward;
870                 if (forward != NULL)
871                         kspin_lock(&forward->guard);
872                 result = thread_wakeup(scan->event);
873                 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
874                 if (result == KERN_NOT_WAITING) {
875                         ++ scan->hits;
876                         if (forward != NULL)
877                                 add_hit(forward, scan->event);
878                 }
879                 if (forward != NULL)
880                         kspin_unlock(&forward->guard);
881                 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
882                         break;
883         }
884         kspin_unlock(&chan->guard);
885 }
886
887 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
888 {
889         SLASSERT(t != NULL);
890         SLASSERT(func != NULL);
891
892         kspin_init(&t->guard);
893         t->func = func;
894         t->arg  = arg;
895         ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
896 }
897
898 void ktimer_done(struct ktimer *t)
899 {
900         SLASSERT(t != NULL);
901         SLASSERT(t->magic == KTIMER_MAGIC);
902         kspin_done(&t->guard);
903         ON_SYNC_DEBUG(t->magic = 0);
904 }
905
906 static void ktimer_actor(void *arg0, void *arg1)
907 {
908         struct ktimer *t;
909         int            armed;
910
911         t = arg0;
912         /*
913          * this assumes that ktimer's are never freed.
914          */
915         SLASSERT(t != NULL);
916         SLASSERT(t->magic == KTIMER_MAGIC);
917
918         /*
919          * call actual timer function
920          */
921         kspin_lock(&t->guard);
922         armed = t->armed;
923         t->armed = 0;
924         kspin_unlock(&t->guard);
925
926         if (armed)
927                 t->func(t->arg);
928 }
929
930 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
931 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
932
933 static void ktimer_disarm_locked(struct ktimer *t)
934 {
935         SLASSERT(t != NULL);
936         SLASSERT(t->magic == KTIMER_MAGIC);
937
938         thread_call_func_cancel(ktimer_actor, t, FALSE);
939 }
940
941 /*
942  * Received deadline is nanoseconds, but time checked by 
943  * thread_call is absolute time (The abstime unit is equal to 
944  * the length of one bus cycle, so the duration is dependent 
945  * on the bus speed of the computer), so we need to convert
946  * nanotime to abstime by nanoseconds_to_absolutetime().
947  *
948  * Refer to _delayed_call_timer(...)
949  *
950  * if thread_call_func_delayed is not exported in the future,
951  * we can use timeout() or bsd_timeout() to replace it.
952  */
953 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
954 {
955         cfs_time_t    abstime;
956         SLASSERT(t != NULL);
957         SLASSERT(t->magic == KTIMER_MAGIC);
958
959         kspin_lock(&t->guard);
960         ktimer_disarm_locked(t);
961         t->armed = 1;
962         nanoseconds_to_absolutetime(deadline, &abstime);
963         thread_call_func_delayed(ktimer_actor, t, deadline);
964         kspin_unlock(&t->guard);
965 }
966
967 void ktimer_disarm(struct ktimer *t)
968 {
969         SLASSERT(t != NULL);
970         SLASSERT(t->magic == KTIMER_MAGIC);
971
972         kspin_lock(&t->guard);
973         t->armed = 0;
974         ktimer_disarm_locked(t);
975         kspin_unlock(&t->guard);
976 }
977
978 int ktimer_is_armed(struct ktimer *t)
979 {
980         SLASSERT(t != NULL);
981         SLASSERT(t->magic == KTIMER_MAGIC);
982
983         /*
984          * no locking---result is only a hint anyway.
985          */
986         return t->armed;
987 }
988
989 u_int64_t ktimer_deadline(struct ktimer *t)
990 {
991         SLASSERT(t != NULL);
992         SLASSERT(t->magic == KTIMER_MAGIC);
993
994         return t->deadline;
995 }
996
997 void cfs_sync_init(void) 
998 {
999 #ifdef __DARWIN8__
1000         /* Initialize lock group */
1001         cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1002 #endif
1003 }
1004
1005 void cfs_sync_fini(void)
1006 {
1007 #ifdef __DARWIN8__
1008         /* 
1009          * XXX Liang: destroy lock group. As we haven't called lock_done
1010          * for all locks, cfs_lock_grp may not be freed by kernel(reference 
1011          * count > 1).
1012          */
1013         lck_grp_free(cfs_lock_grp);
1014         cfs_lock_grp = NULL;
1015 #endif
1016 }
1017 /*
1018  * Local variables:
1019  * c-indentation-style: "K&R"
1020  * c-basic-offset: 8
1021  * tab-width: 8
1022  * fill-column: 80
1023  * scroll-step: 1
1024  * End:
1025  */