Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / libcfs / libcfs / darwin / darwin-sync.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2004 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or modify it under
11  *   the terms of version 2 of the GNU General Public License as published by
12  *   the Free Software Foundation. Lustre is distributed in the hope that it
13  *   will be useful, but WITHOUT ANY WARRANTY; without even the implied
14  *   warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details. You should have received a
16  *   copy of the GNU General Public License along with Lustre; if not, write
17  *   to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
18  *   USA.
19  */
20
21 /*
22  * xnu_sync.c
23  *
24  * Created by nikita on Sun Jul 18 2004.
25  *
26  * XNU synchronization primitives.
27  */
28
29 /*
30  * This file contains very simplistic implementations of (saner) API for
31  * basic synchronization primitives:
32  *
33  *     - spin-lock          (kspin)
34  *
35  *     - semaphore          (ksem)
36  *
37  *     - mutex              (kmut)
38  *
39  *     - condition variable (kcond)
40  *
41  *     - wait-queue         (ksleep_chan and ksleep_link)
42  *
43  *     - timer              (ktimer)
44  *
45  * A lot can be optimized here.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LNET
49
50 #ifdef __DARWIN8__
51 # include <kern/locks.h>
52 #else
53 # include <mach/mach_types.h>
54 # include <sys/types.h>
55 # include <kern/simple_lock.h>
56 #endif
57
58 #include <libcfs/libcfs.h>
59
60 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
61
62 #ifdef HAVE_GET_PREEMPTION_LEVEL
63 extern int get_preemption_level(void);
64 #else
65 #define get_preemption_level() (0)
66 #endif
67
68 #if SMP
69 #ifdef __DARWIN8__
70
71 static lck_grp_t       *cfs_lock_grp = NULL;
72 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
73
74 /* hw_lock_* are not exported by Darwin8 */
75 static inline void xnu_spin_init(xnu_spin_t *s)
76 {
77         SLASSERT(cfs_lock_grp != NULL);
78         //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
79         lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
80 }
81
82 static inline void xnu_spin_done(xnu_spin_t *s)
83 {
84         SLASSERT(cfs_lock_grp != NULL);
85         //lck_spin_free(*s, cfs_lock_grp);
86         //*s = NULL;
87         lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
88 }
89
90 #define xnu_spin_lock(s)        lck_spin_lock((lck_spin_t *)(s))
91 #define xnu_spin_unlock(s)      lck_spin_unlock((lck_spin_t *)(s))
92
93 #warning "Darwin8 does not export lck_spin_try_lock"
94 #define xnu_spin_try(s)         (1)
95
96 #else /* DARWIN8 */
97 extern void                     hw_lock_init(hw_lock_t);
98 extern void                     hw_lock_lock(hw_lock_t);
99 extern void                     hw_lock_unlock(hw_lock_t);
100 extern unsigned int             hw_lock_to(hw_lock_t, unsigned int);
101 extern unsigned int             hw_lock_try(hw_lock_t);
102 extern unsigned int             hw_lock_held(hw_lock_t);
103
104 #define xnu_spin_init(s)        hw_lock_init(s)
105 #define xnu_spin_done(s)        do {} while (0)
106 #define xnu_spin_lock(s)        hw_lock_lock(s)
107 #define xnu_spin_unlock(s)      hw_lock_unlock(s)
108 #define xnu_spin_try(s)         hw_lock_try(s)
109 #endif /* DARWIN8 */
110
111 #else /* SMP */
112 #define xnu_spin_init(s)        do {} while (0)
113 #define xnu_spin_done(s)        do {} while (0)
114 #define xnu_spin_lock(s)        do {} while (0)
115 #define xnu_spin_unlock(s)      do {} while (0)
116 #define xnu_spin_try(s)         (1)
117 #endif /* SMP */
118
119 /*
120  * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
121  * example), uses spin-locks, so debugging output here may lead to nasty
122  * surprises.
123  *
124  * In uniprocessor version of spin-lock. Only checks.
125  */
126
127 void kspin_init(struct kspin *spin)
128 {
129         SLASSERT(spin != NULL);
130         xnu_spin_init(&spin->lock);
131         ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
132         ON_SYNC_DEBUG(spin->owner = NULL);
133 }
134
135 void kspin_done(struct kspin *spin)
136 {
137         SLASSERT(spin != NULL);
138         SLASSERT(spin->magic == KSPIN_MAGIC);
139         SLASSERT(spin->owner == NULL);
140         xnu_spin_done(&spin->lock);
141 }
142
143 void kspin_lock(struct kspin *spin)
144 {
145         SLASSERT(spin != NULL);
146         SLASSERT(spin->magic == KSPIN_MAGIC);
147         SLASSERT(spin->owner != current_thread());
148
149         /*
150          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
151          * from here: this will lead to infinite recursion.
152          */
153
154         xnu_spin_lock(&spin->lock);
155         SLASSERT(spin->owner == NULL);
156         ON_SYNC_DEBUG(spin->owner = current_thread());
157 }
158
159 void kspin_unlock(struct kspin *spin)
160 {
161         /*
162          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
163          * from here: this will lead to infinite recursion.
164          */
165
166         SLASSERT(spin != NULL);
167         SLASSERT(spin->magic == KSPIN_MAGIC);
168         SLASSERT(spin->owner == current_thread());
169         ON_SYNC_DEBUG(spin->owner = NULL);
170         xnu_spin_unlock(&spin->lock);
171 }
172
173 int  kspin_trylock(struct kspin *spin)
174 {
175         SLASSERT(spin != NULL);
176         SLASSERT(spin->magic == KSPIN_MAGIC);
177
178         if (xnu_spin_try(&spin->lock)) {
179                 SLASSERT(spin->owner == NULL);
180                 ON_SYNC_DEBUG(spin->owner = current_thread());
181                 return 1;
182         } else
183                 return 0;
184 }
185
186 #if XNU_SYNC_DEBUG
187 int kspin_islocked(struct kspin *spin)
188 {
189         SLASSERT(spin != NULL);
190         SLASSERT(spin->magic == KSPIN_MAGIC);
191         return spin->owner == current_thread();
192 }
193
194 int kspin_isnotlocked(struct kspin *spin)
195 {
196         SLASSERT(spin != NULL);
197         SLASSERT(spin->magic == KSPIN_MAGIC);
198         return spin->owner != current_thread();
199 }
200 #endif
201
202 /*
203  * read/write spin-lock
204  */
205 void krw_spin_init(struct krw_spin *rwspin)
206 {
207         SLASSERT(rwspin != NULL);
208
209         kspin_init(&rwspin->guard);
210         rwspin->count = 0;
211         ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
212 }
213
214 void krw_spin_done(struct krw_spin *rwspin)
215 {
216         SLASSERT(rwspin != NULL);
217         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
218         SLASSERT(rwspin->count == 0);
219         kspin_done(&rwspin->guard);
220 }
221
222 void krw_spin_down_r(struct krw_spin *rwspin)
223 {
224         int i;
225         SLASSERT(rwspin != NULL);
226         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
227
228         kspin_lock(&rwspin->guard);
229         while(rwspin->count < 0) {
230                 i = -1;
231                 kspin_unlock(&rwspin->guard);
232                 while (--i != 0 && rwspin->count < 0)
233                         continue;
234                 kspin_lock(&rwspin->guard);
235         }
236         ++ rwspin->count;
237         kspin_unlock(&rwspin->guard);
238 }
239
240 void krw_spin_down_w(struct krw_spin *rwspin)
241 {
242         int i;
243         SLASSERT(rwspin != NULL);
244         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
245
246         kspin_lock(&rwspin->guard);
247         while (rwspin->count != 0) {
248                 i = -1;
249                 kspin_unlock(&rwspin->guard);
250                 while (--i != 0 && rwspin->count != 0)
251                         continue;
252                 kspin_lock(&rwspin->guard);
253         }
254         rwspin->count = -1;
255         kspin_unlock(&rwspin->guard);
256 }
257
258 void krw_spin_up_r(struct krw_spin *rwspin)
259 {
260         SLASSERT(rwspin != NULL);
261         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
262         SLASSERT(rwspin->count > 0);
263
264         kspin_lock(&rwspin->guard);
265         -- rwspin->count;
266         kspin_unlock(&rwspin->guard);
267 }
268
269 void krw_spin_up_w(struct krw_spin *rwspin)
270 {
271         SLASSERT(rwspin != NULL);
272         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
273         SLASSERT(rwspin->count == -1);
274
275         kspin_lock(&rwspin->guard);
276         rwspin->count = 0;
277         kspin_unlock(&rwspin->guard);
278 }
279
280 /*
281  * semaphore 
282  */
283 #ifdef __DARWIN8__
284
285 #define xnu_waitq_init(q, a)            do {} while (0)
286 #define xnu_waitq_done(q)               do {} while (0)
287 #define xnu_waitq_wakeup_one(q, e, s)   ({wakeup_one((void *)(e)); KERN_SUCCESS;})
288 #define xnu_waitq_wakeup_all(q, e, s)   ({wakeup((void *)(e)); KERN_SUCCESS;})
289 #define xnu_waitq_assert_wait(q, e, s)  assert_wait((e), s)
290
291 #else /* DARWIN8 */
292
293 #define xnu_waitq_init(q, a)            wait_queue_init((q), a)
294 #define xnu_waitq_done(q)               do {} while (0)
295 #define xnu_waitq_wakeup_one(q, e, s)   wait_queue_wakeup_one((q), (event_t)(e), s)
296 #define xnu_waitq_wakeup_all(q, e, s)   wait_queue_wakeup_all((q), (event_t)(e), s)
297 #define xnu_waitq_assert_wait(q, e, s)  wait_queue_assert_wait((q), (event_t)(e), s)
298
299 #endif /* DARWIN8 */
300 void ksem_init(struct ksem *sem, int value)
301 {
302         SLASSERT(sem != NULL);
303         kspin_init(&sem->guard);
304         xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
305         sem->value = value;
306         ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
307 }
308
309 void ksem_done(struct ksem *sem)
310 {
311         SLASSERT(sem != NULL);
312         SLASSERT(sem->magic == KSEM_MAGIC);
313         /*
314          * XXX nikita: cannot check that &sem->q is empty because
315          * wait_queue_empty() is Apple private API.
316          */
317         kspin_done(&sem->guard);
318 }
319
320 int ksem_up(struct ksem *sem, int value)
321 {
322         int result;
323
324         SLASSERT(sem != NULL);
325         SLASSERT(sem->magic == KSEM_MAGIC);
326         SLASSERT(value >= 0);
327
328         kspin_lock(&sem->guard);
329         sem->value += value;
330         if (sem->value == 0)
331                 result = xnu_waitq_wakeup_one(&sem->q, sem,
332                                               THREAD_AWAKENED);
333         else
334                 result = xnu_waitq_wakeup_all(&sem->q, sem,
335                                               THREAD_AWAKENED);
336         kspin_unlock(&sem->guard);
337         SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
338         return (result == KERN_SUCCESS) ? 0 : 1;
339 }
340
341 void ksem_down(struct ksem *sem, int value)
342 {
343         int result;
344
345         SLASSERT(sem != NULL);
346         SLASSERT(sem->magic == KSEM_MAGIC);
347         SLASSERT(value >= 0);
348         SLASSERT(get_preemption_level() == 0);
349
350         kspin_lock(&sem->guard);
351         while (sem->value < value) {
352                 result = xnu_waitq_assert_wait(&sem->q, sem,
353                                                THREAD_UNINT);
354                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
355                 kspin_unlock(&sem->guard);
356                 if (result == THREAD_WAITING)
357                         thread_block(THREAD_CONTINUE_NULL);
358                 kspin_lock(&sem->guard);
359         }
360         sem->value -= value;
361         kspin_unlock(&sem->guard);
362 }
363
364 int ksem_trydown(struct ksem *sem, int value)
365 {
366         int result;
367
368         SLASSERT(sem != NULL);
369         SLASSERT(sem->magic == KSEM_MAGIC);
370         SLASSERT(value >= 0);
371
372         kspin_lock(&sem->guard);
373         if (sem->value >= value) {
374                 sem->value -= value;
375                 result = 0;
376         } else
377                 result = -EBUSY;
378         kspin_unlock(&sem->guard);
379         return result;
380 }
381
382 void kmut_init(struct kmut *mut)
383 {
384         SLASSERT(mut != NULL);
385         ksem_init(&mut->s, 1);
386         ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
387         ON_SYNC_DEBUG(mut->owner = NULL);
388 }
389
390 void kmut_done(struct kmut *mut)
391 {
392         SLASSERT(mut != NULL);
393         SLASSERT(mut->magic == KMUT_MAGIC);
394         SLASSERT(mut->owner == NULL);
395         ksem_done(&mut->s);
396 }
397
398 void kmut_lock(struct kmut *mut)
399 {
400         SLASSERT(mut != NULL);
401         SLASSERT(mut->magic == KMUT_MAGIC);
402         SLASSERT(mut->owner != current_thread());
403         SLASSERT(get_preemption_level() == 0);
404
405         ksem_down(&mut->s, 1);
406         ON_SYNC_DEBUG(mut->owner = current_thread());
407 }
408
409 void kmut_unlock(struct kmut *mut)
410 {
411         SLASSERT(mut != NULL);
412         SLASSERT(mut->magic == KMUT_MAGIC);
413         SLASSERT(mut->owner == current_thread());
414
415         ON_SYNC_DEBUG(mut->owner = NULL);
416         ksem_up(&mut->s, 1);
417 }
418
419 int kmut_trylock(struct kmut *mut)
420 {
421         SLASSERT(mut != NULL);
422         SLASSERT(mut->magic == KMUT_MAGIC);
423         return ksem_trydown(&mut->s, 1);
424 }
425
426 #if XNU_SYNC_DEBUG
427 int kmut_islocked(struct kmut *mut)
428 {
429         SLASSERT(mut != NULL);
430         SLASSERT(mut->magic == KMUT_MAGIC);
431         return mut->owner == current_thread();
432 }
433
434 int kmut_isnotlocked(struct kmut *mut)
435 {
436         SLASSERT(mut != NULL);
437         SLASSERT(mut->magic == KMUT_MAGIC);
438         return mut->owner != current_thread();
439 }
440 #endif
441
442
443 void kcond_init(struct kcond *cond)
444 {
445         SLASSERT(cond != NULL);
446
447         kspin_init(&cond->guard);
448         cond->waiters = NULL;
449         ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
450 }
451
452 void kcond_done(struct kcond *cond)
453 {
454         SLASSERT(cond != NULL);
455         SLASSERT(cond->magic == KCOND_MAGIC);
456         SLASSERT(cond->waiters == NULL);
457         kspin_done(&cond->guard);
458 }
459
460 void kcond_wait(struct kcond *cond, struct kspin *lock)
461 {
462         struct kcond_link link;
463
464         SLASSERT(cond != NULL);
465         SLASSERT(lock != NULL);
466         SLASSERT(cond->magic == KCOND_MAGIC);
467         SLASSERT(kspin_islocked(lock));
468
469         ksem_init(&link.sem, 0);
470         kspin_lock(&cond->guard);
471         link.next = cond->waiters;
472         cond->waiters = &link;
473         kspin_unlock(&cond->guard);
474         kspin_unlock(lock);
475
476         ksem_down(&link.sem, 1);
477
478         kspin_lock(&cond->guard);
479         kspin_unlock(&cond->guard);
480         kspin_lock(lock);
481 }
482
483 void kcond_wait_guard(struct kcond *cond)
484 {
485         struct kcond_link link;
486
487         SLASSERT(cond != NULL);
488         SLASSERT(cond->magic == KCOND_MAGIC);
489         SLASSERT(kspin_islocked(&cond->guard));
490
491         ksem_init(&link.sem, 0);
492         link.next = cond->waiters;
493         cond->waiters = &link;
494         kspin_unlock(&cond->guard);
495
496         ksem_down(&link.sem, 1);
497
498         kspin_lock(&cond->guard);
499 }
500
501 void kcond_signal_guard(struct kcond *cond)
502 {
503         struct kcond_link *link;
504
505         SLASSERT(cond != NULL);
506         SLASSERT(cond->magic == KCOND_MAGIC);
507         SLASSERT(kspin_islocked(&cond->guard));
508
509         link = cond->waiters;
510         if (link != NULL) {
511                 cond->waiters = link->next;
512                 ksem_up(&link->sem, 1);
513         }
514 }
515
516 void kcond_signal(struct kcond *cond)
517 {
518         SLASSERT(cond != NULL);
519         SLASSERT(cond->magic == KCOND_MAGIC);
520
521         kspin_lock(&cond->guard);
522         kcond_signal_guard(cond);
523         kspin_unlock(&cond->guard);
524 }
525
526 void kcond_broadcast_guard(struct kcond *cond)
527 {
528         struct kcond_link *link;
529
530         SLASSERT(cond != NULL);
531         SLASSERT(cond->magic == KCOND_MAGIC);
532         SLASSERT(kspin_islocked(&cond->guard));
533
534         for (link = cond->waiters; link != NULL; link = link->next)
535                 ksem_up(&link->sem, 1);
536         cond->waiters = NULL;
537 }
538
539 void kcond_broadcast(struct kcond *cond)
540 {
541         SLASSERT(cond != NULL);
542         SLASSERT(cond->magic == KCOND_MAGIC);
543
544         kspin_lock(&cond->guard);
545         kcond_broadcast_guard(cond);
546         kspin_unlock(&cond->guard);
547 }
548
549 void krw_sem_init(struct krw_sem *sem)
550 {
551         SLASSERT(sem != NULL);
552
553         kcond_init(&sem->cond);
554         sem->count = 0;
555         ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
556 }
557
558 void krw_sem_done(struct krw_sem *sem)
559 {
560         SLASSERT(sem != NULL);
561         SLASSERT(sem->magic == KRW_MAGIC);
562         SLASSERT(sem->count == 0);
563         kcond_done(&sem->cond);
564 }
565
566 void krw_sem_down_r(struct krw_sem *sem)
567 {
568         SLASSERT(sem != NULL);
569         SLASSERT(sem->magic == KRW_MAGIC);
570         SLASSERT(get_preemption_level() == 0);
571
572         kspin_lock(&sem->cond.guard);
573         while (sem->count < 0)
574                 kcond_wait_guard(&sem->cond);
575         ++ sem->count;
576         kspin_unlock(&sem->cond.guard);
577 }
578
579 int krw_sem_down_r_try(struct krw_sem *sem)
580 {
581         SLASSERT(sem != NULL);
582         SLASSERT(sem->magic == KRW_MAGIC);
583
584         kspin_lock(&sem->cond.guard);
585         if (sem->count < 0) {
586                 kspin_unlock(&sem->cond.guard);
587                 return -EBUSY;
588         }
589         ++ sem->count;
590         kspin_unlock(&sem->cond.guard);
591         return 0;
592 }
593
594 void krw_sem_down_w(struct krw_sem *sem)
595 {
596         SLASSERT(sem != NULL);
597         SLASSERT(sem->magic == KRW_MAGIC);
598         SLASSERT(get_preemption_level() == 0);
599
600         kspin_lock(&sem->cond.guard);
601         while (sem->count != 0)
602                 kcond_wait_guard(&sem->cond);
603         sem->count = -1;
604         kspin_unlock(&sem->cond.guard);
605 }
606
607 int krw_sem_down_w_try(struct krw_sem *sem)
608 {
609         SLASSERT(sem != NULL);
610         SLASSERT(sem->magic == KRW_MAGIC);
611
612         kspin_lock(&sem->cond.guard);
613         if (sem->count != 0) {
614                 kspin_unlock(&sem->cond.guard);
615                 return -EBUSY;
616         }
617         sem->count = -1;
618         kspin_unlock(&sem->cond.guard);
619         return 0;
620 }
621
622 void krw_sem_up_r(struct krw_sem *sem)
623 {
624         SLASSERT(sem != NULL);
625         SLASSERT(sem->magic == KRW_MAGIC);
626         SLASSERT(sem->count > 0);
627
628         kspin_lock(&sem->cond.guard);
629         -- sem->count;
630         if (sem->count == 0)
631                 kcond_broadcast_guard(&sem->cond);
632         kspin_unlock(&sem->cond.guard);
633 }
634
635 void krw_sem_up_w(struct krw_sem *sem)
636 {
637         SLASSERT(sem != NULL);
638         SLASSERT(sem->magic == KRW_MAGIC);
639         SLASSERT(sem->count == -1);
640
641         kspin_lock(&sem->cond.guard);
642         sem->count = 0;
643         kspin_unlock(&sem->cond.guard);
644         kcond_broadcast(&sem->cond);
645 }
646
647 void ksleep_chan_init(struct ksleep_chan *chan)
648 {
649         SLASSERT(chan != NULL);
650
651         kspin_init(&chan->guard);
652         CFS_INIT_LIST_HEAD(&chan->waiters);
653         ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
654 }
655
656 void ksleep_chan_done(struct ksleep_chan *chan)
657 {
658         SLASSERT(chan != NULL);
659         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
660         SLASSERT(list_empty(&chan->waiters));
661         kspin_done(&chan->guard);
662 }
663
664 void ksleep_link_init(struct ksleep_link *link)
665 {
666         SLASSERT(link != NULL);
667
668         CFS_INIT_LIST_HEAD(&link->linkage);
669         link->flags = 0;
670         link->event = current_thread();
671         link->hits  = 0;
672         link->forward = NULL;
673         ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
674 }
675
676 void ksleep_link_done(struct ksleep_link *link)
677 {
678         SLASSERT(link != NULL);
679         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
680         SLASSERT(list_empty(&link->linkage));
681 }
682
683 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
684 {
685         SLASSERT(chan != NULL);
686         SLASSERT(link != NULL);
687         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
688         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
689         SLASSERT(list_empty(&link->linkage));
690
691         kspin_lock(&chan->guard);
692         if (link->flags & KSLEEP_EXCLUSIVE)
693                 list_add_tail(&link->linkage, &chan->waiters);
694         else
695                 list_add(&link->linkage, &chan->waiters);
696         kspin_unlock(&chan->guard);
697 }
698
699 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
700 {
701         SLASSERT(chan != NULL);
702         SLASSERT(link != NULL);
703         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
704         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
705
706         kspin_lock(&chan->guard);
707         list_del_init(&link->linkage);
708         kspin_unlock(&chan->guard);
709 }
710
711 static int has_hits(struct ksleep_chan *chan, event_t event)
712 {
713         struct ksleep_link *scan;
714
715         SLASSERT(kspin_islocked(&chan->guard));
716         list_for_each_entry(scan, &chan->waiters, linkage) {
717                 if (scan->event == event && scan->hits > 0) {
718                         /* consume hit */
719                         -- scan->hits;
720                         return 1;
721                 }
722         }
723         return 0;
724 }
725
726 static void add_hit(struct ksleep_chan *chan, event_t event)
727 {
728         struct ksleep_link *scan;
729
730         /*
731          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
732          * from here: this will lead to infinite recursion.
733          */
734
735         SLASSERT(kspin_islocked(&chan->guard));
736         list_for_each_entry(scan, &chan->waiters, linkage) {
737                 if (scan->event == event) {
738                         ++ scan->hits;
739                         break;
740                 }
741         }
742 }
743
744 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
745 {
746         event_t event;
747         int     result;
748
749         ENTRY;
750
751         SLASSERT(chan != NULL);
752         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
753         SLASSERT(get_preemption_level() == 0);
754
755         event = current_thread();
756         kspin_lock(&chan->guard);
757         if (!has_hits(chan, event)) {
758                 result = assert_wait(event, state);
759                 kspin_unlock(&chan->guard);
760                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
761                 if (result == THREAD_WAITING)
762                         thread_block(THREAD_CONTINUE_NULL);
763         } else
764                 kspin_unlock(&chan->guard);
765         EXIT;
766 }
767
768 /*
769  * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
770  * sleep time (non-zero only if thread was waken by a signal (not currently
771  * implemented), or waitq was already in the "signalled" state).
772  */
773 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
774                          cfs_task_state_t state,
775                          __u64 timeout)
776 {
777         event_t event;
778
779         ENTRY;
780
781         SLASSERT(chan != NULL);
782         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
783         SLASSERT(get_preemption_level() == 0);
784
785         event = current_thread();
786         kspin_lock(&chan->guard);
787         if (!has_hits(chan, event)) {
788                 int      result;
789                 __u64 expire;
790                 result = assert_wait(event, state);
791                 if (timeout > 0) {
792                         /*
793                          * arm a timer. thread_set_timer()'s first argument is
794                          * uint32_t, so we have to cook deadline ourselves.
795                          */
796                         nanoseconds_to_absolutetime(timeout, &expire);
797                         clock_absolutetime_interval_to_deadline(expire, &expire);
798                         thread_set_timer_deadline(expire);
799                 }
800                 kspin_unlock(&chan->guard);
801                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
802                 if (result == THREAD_WAITING)
803                         result = thread_block(THREAD_CONTINUE_NULL);
804                 thread_cancel_timer();
805
806                 if (result == THREAD_TIMED_OUT)
807                         timeout = 0;
808                 else {
809                         __u64 now;
810                         clock_get_uptime(&now);
811                         if (expire > now)
812                                 absolutetime_to_nanoseconds(expire - now, &timeout);
813                         else
814                                 timeout = 0;
815                 }
816         } else  {
817                 /* just return timeout, because I've got event and don't need to wait */
818                 kspin_unlock(&chan->guard);
819         }
820
821         RETURN(timeout);
822 }
823
824 /*
825  * wake up single exclusive waiter (plus some arbitrary number of *
826  * non-exclusive)
827  */
828 void ksleep_wake(struct ksleep_chan *chan)
829 {
830         /*
831          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
832          * from here: this will lead to infinite recursion.
833          */
834         ksleep_wake_nr(chan, 1);
835 }
836
837 /*
838  * wake up all waiters on @chan
839  */
840 void ksleep_wake_all(struct ksleep_chan *chan)
841 {
842         ENTRY;
843         ksleep_wake_nr(chan, 0);
844         EXIT;
845 }
846
847 /*
848  * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
849  * number of non-exclusive. If @nr is 0, wake up all waiters.
850  */
851 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
852 {
853         struct ksleep_link *scan;
854         int result;
855
856         /*
857          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
858          * from here: this will lead to infinite recursion.
859          */
860
861         SLASSERT(chan != NULL);
862         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
863
864         kspin_lock(&chan->guard);
865         list_for_each_entry(scan, &chan->waiters, linkage) {
866                 struct ksleep_chan *forward;
867
868                 forward = scan->forward;
869                 if (forward != NULL)
870                         kspin_lock(&forward->guard);
871                 result = thread_wakeup(scan->event);
872                 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
873                 if (result == KERN_NOT_WAITING) {
874                         ++ scan->hits;
875                         if (forward != NULL)
876                                 add_hit(forward, scan->event);
877                 }
878                 if (forward != NULL)
879                         kspin_unlock(&forward->guard);
880                 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
881                         break;
882         }
883         kspin_unlock(&chan->guard);
884 }
885
886 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
887 {
888         SLASSERT(t != NULL);
889         SLASSERT(func != NULL);
890
891         kspin_init(&t->guard);
892         t->func = func;
893         t->arg  = arg;
894         ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
895 }
896
897 void ktimer_done(struct ktimer *t)
898 {
899         SLASSERT(t != NULL);
900         SLASSERT(t->magic == KTIMER_MAGIC);
901         kspin_done(&t->guard);
902         ON_SYNC_DEBUG(t->magic = 0);
903 }
904
905 static void ktimer_actor(void *arg0, void *arg1)
906 {
907         struct ktimer *t;
908         int            armed;
909
910         t = arg0;
911         /*
912          * this assumes that ktimer's are never freed.
913          */
914         SLASSERT(t != NULL);
915         SLASSERT(t->magic == KTIMER_MAGIC);
916
917         /*
918          * call actual timer function
919          */
920         kspin_lock(&t->guard);
921         armed = t->armed;
922         t->armed = 0;
923         kspin_unlock(&t->guard);
924
925         if (armed)
926                 t->func(t->arg);
927 }
928
929 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
930 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
931
932 static void ktimer_disarm_locked(struct ktimer *t)
933 {
934         SLASSERT(t != NULL);
935         SLASSERT(t->magic == KTIMER_MAGIC);
936
937         thread_call_func_cancel(ktimer_actor, t, FALSE);
938 }
939
940 /*
941  * Received deadline is nanoseconds, but time checked by 
942  * thread_call is absolute time (The abstime unit is equal to 
943  * the length of one bus cycle, so the duration is dependent 
944  * on the bus speed of the computer), so we need to convert
945  * nanotime to abstime by nanoseconds_to_absolutetime().
946  *
947  * Refer to _delayed_call_timer(...)
948  *
949  * if thread_call_func_delayed is not exported in the future,
950  * we can use timeout() or bsd_timeout() to replace it.
951  */
952 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
953 {
954         cfs_time_t    abstime;
955         SLASSERT(t != NULL);
956         SLASSERT(t->magic == KTIMER_MAGIC);
957
958         kspin_lock(&t->guard);
959         ktimer_disarm_locked(t);
960         t->armed = 1;
961         nanoseconds_to_absolutetime(deadline, &abstime);
962         thread_call_func_delayed(ktimer_actor, t, deadline);
963         kspin_unlock(&t->guard);
964 }
965
966 void ktimer_disarm(struct ktimer *t)
967 {
968         SLASSERT(t != NULL);
969         SLASSERT(t->magic == KTIMER_MAGIC);
970
971         kspin_lock(&t->guard);
972         t->armed = 0;
973         ktimer_disarm_locked(t);
974         kspin_unlock(&t->guard);
975 }
976
977 int ktimer_is_armed(struct ktimer *t)
978 {
979         SLASSERT(t != NULL);
980         SLASSERT(t->magic == KTIMER_MAGIC);
981
982         /*
983          * no locking---result is only a hint anyway.
984          */
985         return t->armed;
986 }
987
988 u_int64_t ktimer_deadline(struct ktimer *t)
989 {
990         SLASSERT(t != NULL);
991         SLASSERT(t->magic == KTIMER_MAGIC);
992
993         return t->deadline;
994 }
995
996 void cfs_sync_init(void) 
997 {
998 #ifdef __DARWIN8__
999         /* Initialize lock group */
1000         cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1001 #endif
1002 }
1003
1004 void cfs_sync_fini(void)
1005 {
1006 #ifdef __DARWIN8__
1007         /* 
1008          * XXX Liang: destroy lock group. As we haven't called lock_done
1009          * for all locks, cfs_lock_grp may not be freed by kernel(reference 
1010          * count > 1).
1011          */
1012         lck_grp_free(cfs_lock_grp);
1013         cfs_lock_grp = NULL;
1014 #endif
1015 }
1016 /*
1017  * Local variables:
1018  * c-indentation-style: "K&R"
1019  * c-basic-offset: 8
1020  * tab-width: 8
1021  * fill-column: 80
1022  * scroll-step: 1
1023  * End:
1024  */