Whamcloud - gitweb
f8a383eb13e2eeecd5a1e1a26b12538e6a304e3f
[fs/lustre-release.git] / libcfs / libcfs / darwin / darwin-sync.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/darwin/darwin-sync.c
37  *
38  * XNU synchronization primitives.
39  *
40  * Author: Nikita Danilov <nikita@clusterfs.com>
41  */
42
43 /*
44  * This file contains very simplistic implementations of (saner) API for
45  * basic synchronization primitives:
46  *
47  *     - spin-lock          (kspin)
48  *
49  *     - semaphore          (ksem)
50  *
51  *     - mutex              (kmut)
52  *
53  *     - condition variable (kcond)
54  *
55  *     - wait-queue         (ksleep_chan and ksleep_link)
56  *
57  *     - timer              (ktimer)
58  *
59  * A lot can be optimized here.
60  */
61
62 #define DEBUG_SUBSYSTEM S_LNET
63
64 #ifdef __DARWIN8__
65 # include <kern/locks.h>
66 #else
67 # include <mach/mach_types.h>
68 # include <sys/types.h>
69 # include <kern/simple_lock.h>
70 #endif
71
72 #include <libcfs/libcfs.h>
73
74 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
75
76 #ifdef HAVE_GET_PREEMPTION_LEVEL
77 extern int get_preemption_level(void);
78 #else
79 #define get_preemption_level() (0)
80 #endif
81
82 #if SMP
83 #ifdef __DARWIN8__
84
85 static lck_grp_t       *cfs_lock_grp = NULL;
86 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
87
88 /* hw_lock_* are not exported by Darwin8 */
89 static inline void xnu_spin_init(xnu_spin_t *s)
90 {
91         SLASSERT(cfs_lock_grp != NULL);
92         //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
93         lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
94 }
95
96 static inline void xnu_spin_done(xnu_spin_t *s)
97 {
98         SLASSERT(cfs_lock_grp != NULL);
99         //lck_spin_free(*s, cfs_lock_grp);
100         //*s = NULL;
101         lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
102 }
103
104 #define xnu_spin_lock(s)        lck_spin_lock((lck_spin_t *)(s))
105 #define xnu_spin_unlock(s)      lck_spin_unlock((lck_spin_t *)(s))
106
107 #warning "Darwin8 does not export lck_spin_try_lock"
108 #define xnu_spin_try(s)         (1)
109
110 #else /* DARWIN8 */
111 extern void                     hw_lock_init(hw_lock_t);
112 extern void                     hw_lock_lock(hw_lock_t);
113 extern void                     hw_lock_unlock(hw_lock_t);
114 extern unsigned int             hw_lock_to(hw_lock_t, unsigned int);
115 extern unsigned int             hw_lock_try(hw_lock_t);
116 extern unsigned int             hw_lock_held(hw_lock_t);
117
118 #define xnu_spin_init(s)        hw_lock_init(s)
119 #define xnu_spin_done(s)        do {} while (0)
120 #define xnu_spin_lock(s)        hw_lock_lock(s)
121 #define xnu_spin_unlock(s)      hw_lock_unlock(s)
122 #define xnu_spin_try(s)         hw_lock_try(s)
123 #endif /* DARWIN8 */
124
125 #else /* SMP */
126 #define xnu_spin_init(s)        do {} while (0)
127 #define xnu_spin_done(s)        do {} while (0)
128 #define xnu_spin_lock(s)        do {} while (0)
129 #define xnu_spin_unlock(s)      do {} while (0)
130 #define xnu_spin_try(s)         (1)
131 #endif /* SMP */
132
133 /*
134  * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
135  * example), uses spin-locks, so debugging output here may lead to nasty
136  * surprises.
137  *
138  * In uniprocessor version of spin-lock. Only checks.
139  */
140
141 void kspin_init(struct kspin *spin)
142 {
143         SLASSERT(spin != NULL);
144         xnu_spin_init(&spin->lock);
145         ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
146         ON_SYNC_DEBUG(spin->owner = NULL);
147 }
148
149 void kspin_done(struct kspin *spin)
150 {
151         SLASSERT(spin != NULL);
152         SLASSERT(spin->magic == KSPIN_MAGIC);
153         SLASSERT(spin->owner == NULL);
154         xnu_spin_done(&spin->lock);
155 }
156
157 void kspin_lock(struct kspin *spin)
158 {
159         SLASSERT(spin != NULL);
160         SLASSERT(spin->magic == KSPIN_MAGIC);
161         SLASSERT(spin->owner != current_thread());
162
163         /*
164          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
165          * from here: this will lead to infinite recursion.
166          */
167
168         xnu_spin_lock(&spin->lock);
169         SLASSERT(spin->owner == NULL);
170         ON_SYNC_DEBUG(spin->owner = current_thread());
171 }
172
173 void kspin_unlock(struct kspin *spin)
174 {
175         /*
176          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
177          * from here: this will lead to infinite recursion.
178          */
179
180         SLASSERT(spin != NULL);
181         SLASSERT(spin->magic == KSPIN_MAGIC);
182         SLASSERT(spin->owner == current_thread());
183         ON_SYNC_DEBUG(spin->owner = NULL);
184         xnu_spin_unlock(&spin->lock);
185 }
186
187 int  kspin_trylock(struct kspin *spin)
188 {
189         SLASSERT(spin != NULL);
190         SLASSERT(spin->magic == KSPIN_MAGIC);
191
192         if (xnu_spin_try(&spin->lock)) {
193                 SLASSERT(spin->owner == NULL);
194                 ON_SYNC_DEBUG(spin->owner = current_thread());
195                 return 1;
196         } else
197                 return 0;
198 }
199
200 #if XNU_SYNC_DEBUG
201 int kspin_islocked(struct kspin *spin)
202 {
203         SLASSERT(spin != NULL);
204         SLASSERT(spin->magic == KSPIN_MAGIC);
205         return spin->owner == current_thread();
206 }
207
208 int kspin_isnotlocked(struct kspin *spin)
209 {
210         SLASSERT(spin != NULL);
211         SLASSERT(spin->magic == KSPIN_MAGIC);
212         return spin->owner != current_thread();
213 }
214 #endif
215
216 /*
217  * read/write spin-lock
218  */
219 void krw_spin_init(struct krw_spin *rwspin)
220 {
221         SLASSERT(rwspin != NULL);
222
223         kspin_init(&rwspin->guard);
224         rwspin->count = 0;
225         ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
226 }
227
228 void krw_spin_done(struct krw_spin *rwspin)
229 {
230         SLASSERT(rwspin != NULL);
231         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
232         SLASSERT(rwspin->count == 0);
233         kspin_done(&rwspin->guard);
234 }
235
236 void krw_spin_down_r(struct krw_spin *rwspin)
237 {
238         int i;
239         SLASSERT(rwspin != NULL);
240         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
241
242         kspin_lock(&rwspin->guard);
243         while(rwspin->count < 0) {
244                 i = -1;
245                 kspin_unlock(&rwspin->guard);
246                 while (--i != 0 && rwspin->count < 0)
247                         continue;
248                 kspin_lock(&rwspin->guard);
249         }
250         ++ rwspin->count;
251         kspin_unlock(&rwspin->guard);
252 }
253
254 void krw_spin_down_w(struct krw_spin *rwspin)
255 {
256         int i;
257         SLASSERT(rwspin != NULL);
258         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
259
260         kspin_lock(&rwspin->guard);
261         while (rwspin->count != 0) {
262                 i = -1;
263                 kspin_unlock(&rwspin->guard);
264                 while (--i != 0 && rwspin->count != 0)
265                         continue;
266                 kspin_lock(&rwspin->guard);
267         }
268         rwspin->count = -1;
269         kspin_unlock(&rwspin->guard);
270 }
271
272 void krw_spin_up_r(struct krw_spin *rwspin)
273 {
274         SLASSERT(rwspin != NULL);
275         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
276         SLASSERT(rwspin->count > 0);
277
278         kspin_lock(&rwspin->guard);
279         -- rwspin->count;
280         kspin_unlock(&rwspin->guard);
281 }
282
283 void krw_spin_up_w(struct krw_spin *rwspin)
284 {
285         SLASSERT(rwspin != NULL);
286         SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
287         SLASSERT(rwspin->count == -1);
288
289         kspin_lock(&rwspin->guard);
290         rwspin->count = 0;
291         kspin_unlock(&rwspin->guard);
292 }
293
294 /*
295  * semaphore 
296  */
297 #ifdef __DARWIN8__
298
299 #define xnu_waitq_init(q, a)            do {} while (0)
300 #define xnu_waitq_done(q)               do {} while (0)
301 #define xnu_waitq_wakeup_one(q, e, s)   ({wakeup_one((void *)(e)); KERN_SUCCESS;})
302 #define xnu_waitq_wakeup_all(q, e, s)   ({wakeup((void *)(e)); KERN_SUCCESS;})
303 #define xnu_waitq_assert_wait(q, e, s)  assert_wait((e), s)
304
305 #else /* DARWIN8 */
306
307 #define xnu_waitq_init(q, a)            wait_queue_init((q), a)
308 #define xnu_waitq_done(q)               do {} while (0)
309 #define xnu_waitq_wakeup_one(q, e, s)   wait_queue_wakeup_one((q), (event_t)(e), s)
310 #define xnu_waitq_wakeup_all(q, e, s)   wait_queue_wakeup_all((q), (event_t)(e), s)
311 #define xnu_waitq_assert_wait(q, e, s)  wait_queue_assert_wait((q), (event_t)(e), s)
312
313 #endif /* DARWIN8 */
314 void ksem_init(struct ksem *sem, int value)
315 {
316         SLASSERT(sem != NULL);
317         kspin_init(&sem->guard);
318         xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
319         sem->value = value;
320         ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
321 }
322
323 void ksem_done(struct ksem *sem)
324 {
325         SLASSERT(sem != NULL);
326         SLASSERT(sem->magic == KSEM_MAGIC);
327         /*
328          * XXX nikita: cannot check that &sem->q is empty because
329          * wait_queue_empty() is Apple private API.
330          */
331         kspin_done(&sem->guard);
332 }
333
334 int ksem_up(struct ksem *sem, int value)
335 {
336         int result;
337
338         SLASSERT(sem != NULL);
339         SLASSERT(sem->magic == KSEM_MAGIC);
340         SLASSERT(value >= 0);
341
342         kspin_lock(&sem->guard);
343         sem->value += value;
344         if (sem->value == 0)
345                 result = xnu_waitq_wakeup_one(&sem->q, sem,
346                                               THREAD_AWAKENED);
347         else
348                 result = xnu_waitq_wakeup_all(&sem->q, sem,
349                                               THREAD_AWAKENED);
350         kspin_unlock(&sem->guard);
351         SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
352         return (result == KERN_SUCCESS) ? 0 : 1;
353 }
354
355 void ksem_down(struct ksem *sem, int value)
356 {
357         int result;
358
359         SLASSERT(sem != NULL);
360         SLASSERT(sem->magic == KSEM_MAGIC);
361         SLASSERT(value >= 0);
362         SLASSERT(get_preemption_level() == 0);
363
364         kspin_lock(&sem->guard);
365         while (sem->value < value) {
366                 result = xnu_waitq_assert_wait(&sem->q, sem,
367                                                THREAD_UNINT);
368                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
369                 kspin_unlock(&sem->guard);
370                 if (result == THREAD_WAITING)
371                         thread_block(THREAD_CONTINUE_NULL);
372                 kspin_lock(&sem->guard);
373         }
374         sem->value -= value;
375         kspin_unlock(&sem->guard);
376 }
377
378 int ksem_trydown(struct ksem *sem, int value)
379 {
380         int result;
381
382         SLASSERT(sem != NULL);
383         SLASSERT(sem->magic == KSEM_MAGIC);
384         SLASSERT(value >= 0);
385
386         kspin_lock(&sem->guard);
387         if (sem->value >= value) {
388                 sem->value -= value;
389                 result = 0;
390         } else
391                 result = -EBUSY;
392         kspin_unlock(&sem->guard);
393         return result;
394 }
395
396 void kmut_init(struct kmut *mut)
397 {
398         SLASSERT(mut != NULL);
399         ksem_init(&mut->s, 1);
400         ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
401         ON_SYNC_DEBUG(mut->owner = NULL);
402 }
403
404 void kmut_done(struct kmut *mut)
405 {
406         SLASSERT(mut != NULL);
407         SLASSERT(mut->magic == KMUT_MAGIC);
408         SLASSERT(mut->owner == NULL);
409         ksem_done(&mut->s);
410 }
411
412 void kmut_lock(struct kmut *mut)
413 {
414         SLASSERT(mut != NULL);
415         SLASSERT(mut->magic == KMUT_MAGIC);
416         SLASSERT(mut->owner != current_thread());
417         SLASSERT(get_preemption_level() == 0);
418
419         ksem_down(&mut->s, 1);
420         ON_SYNC_DEBUG(mut->owner = current_thread());
421 }
422
423 void kmut_unlock(struct kmut *mut)
424 {
425         SLASSERT(mut != NULL);
426         SLASSERT(mut->magic == KMUT_MAGIC);
427         SLASSERT(mut->owner == current_thread());
428
429         ON_SYNC_DEBUG(mut->owner = NULL);
430         ksem_up(&mut->s, 1);
431 }
432
433 int kmut_trylock(struct kmut *mut)
434 {
435         SLASSERT(mut != NULL);
436         SLASSERT(mut->magic == KMUT_MAGIC);
437         return ksem_trydown(&mut->s, 1);
438 }
439
440 #if XNU_SYNC_DEBUG
441 int kmut_islocked(struct kmut *mut)
442 {
443         SLASSERT(mut != NULL);
444         SLASSERT(mut->magic == KMUT_MAGIC);
445         return mut->owner == current_thread();
446 }
447
448 int kmut_isnotlocked(struct kmut *mut)
449 {
450         SLASSERT(mut != NULL);
451         SLASSERT(mut->magic == KMUT_MAGIC);
452         return mut->owner != current_thread();
453 }
454 #endif
455
456
457 void kcond_init(struct kcond *cond)
458 {
459         SLASSERT(cond != NULL);
460
461         kspin_init(&cond->guard);
462         cond->waiters = NULL;
463         ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
464 }
465
466 void kcond_done(struct kcond *cond)
467 {
468         SLASSERT(cond != NULL);
469         SLASSERT(cond->magic == KCOND_MAGIC);
470         SLASSERT(cond->waiters == NULL);
471         kspin_done(&cond->guard);
472 }
473
474 void kcond_wait(struct kcond *cond, struct kspin *lock)
475 {
476         struct kcond_link link;
477
478         SLASSERT(cond != NULL);
479         SLASSERT(lock != NULL);
480         SLASSERT(cond->magic == KCOND_MAGIC);
481         SLASSERT(kspin_islocked(lock));
482
483         ksem_init(&link.sem, 0);
484         kspin_lock(&cond->guard);
485         link.next = cond->waiters;
486         cond->waiters = &link;
487         kspin_unlock(&cond->guard);
488         kspin_unlock(lock);
489
490         ksem_down(&link.sem, 1);
491
492         kspin_lock(&cond->guard);
493         kspin_unlock(&cond->guard);
494         kspin_lock(lock);
495 }
496
497 void kcond_wait_guard(struct kcond *cond)
498 {
499         struct kcond_link link;
500
501         SLASSERT(cond != NULL);
502         SLASSERT(cond->magic == KCOND_MAGIC);
503         SLASSERT(kspin_islocked(&cond->guard));
504
505         ksem_init(&link.sem, 0);
506         link.next = cond->waiters;
507         cond->waiters = &link;
508         kspin_unlock(&cond->guard);
509
510         ksem_down(&link.sem, 1);
511
512         kspin_lock(&cond->guard);
513 }
514
515 void kcond_signal_guard(struct kcond *cond)
516 {
517         struct kcond_link *link;
518
519         SLASSERT(cond != NULL);
520         SLASSERT(cond->magic == KCOND_MAGIC);
521         SLASSERT(kspin_islocked(&cond->guard));
522
523         link = cond->waiters;
524         if (link != NULL) {
525                 cond->waiters = link->next;
526                 ksem_up(&link->sem, 1);
527         }
528 }
529
530 void kcond_signal(struct kcond *cond)
531 {
532         SLASSERT(cond != NULL);
533         SLASSERT(cond->magic == KCOND_MAGIC);
534
535         kspin_lock(&cond->guard);
536         kcond_signal_guard(cond);
537         kspin_unlock(&cond->guard);
538 }
539
540 void kcond_broadcast_guard(struct kcond *cond)
541 {
542         struct kcond_link *link;
543
544         SLASSERT(cond != NULL);
545         SLASSERT(cond->magic == KCOND_MAGIC);
546         SLASSERT(kspin_islocked(&cond->guard));
547
548         for (link = cond->waiters; link != NULL; link = link->next)
549                 ksem_up(&link->sem, 1);
550         cond->waiters = NULL;
551 }
552
553 void kcond_broadcast(struct kcond *cond)
554 {
555         SLASSERT(cond != NULL);
556         SLASSERT(cond->magic == KCOND_MAGIC);
557
558         kspin_lock(&cond->guard);
559         kcond_broadcast_guard(cond);
560         kspin_unlock(&cond->guard);
561 }
562
563 void krw_sem_init(struct krw_sem *sem)
564 {
565         SLASSERT(sem != NULL);
566
567         kcond_init(&sem->cond);
568         sem->count = 0;
569         ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
570 }
571
572 void krw_sem_done(struct krw_sem *sem)
573 {
574         SLASSERT(sem != NULL);
575         SLASSERT(sem->magic == KRW_MAGIC);
576         SLASSERT(sem->count == 0);
577         kcond_done(&sem->cond);
578 }
579
580 void krw_sem_down_r(struct krw_sem *sem)
581 {
582         SLASSERT(sem != NULL);
583         SLASSERT(sem->magic == KRW_MAGIC);
584         SLASSERT(get_preemption_level() == 0);
585
586         kspin_lock(&sem->cond.guard);
587         while (sem->count < 0)
588                 kcond_wait_guard(&sem->cond);
589         ++ sem->count;
590         kspin_unlock(&sem->cond.guard);
591 }
592
593 int krw_sem_down_r_try(struct krw_sem *sem)
594 {
595         SLASSERT(sem != NULL);
596         SLASSERT(sem->magic == KRW_MAGIC);
597
598         kspin_lock(&sem->cond.guard);
599         if (sem->count < 0) {
600                 kspin_unlock(&sem->cond.guard);
601                 return -EBUSY;
602         }
603         ++ sem->count;
604         kspin_unlock(&sem->cond.guard);
605         return 0;
606 }
607
608 void krw_sem_down_w(struct krw_sem *sem)
609 {
610         SLASSERT(sem != NULL);
611         SLASSERT(sem->magic == KRW_MAGIC);
612         SLASSERT(get_preemption_level() == 0);
613
614         kspin_lock(&sem->cond.guard);
615         while (sem->count != 0)
616                 kcond_wait_guard(&sem->cond);
617         sem->count = -1;
618         kspin_unlock(&sem->cond.guard);
619 }
620
621 int krw_sem_down_w_try(struct krw_sem *sem)
622 {
623         SLASSERT(sem != NULL);
624         SLASSERT(sem->magic == KRW_MAGIC);
625
626         kspin_lock(&sem->cond.guard);
627         if (sem->count != 0) {
628                 kspin_unlock(&sem->cond.guard);
629                 return -EBUSY;
630         }
631         sem->count = -1;
632         kspin_unlock(&sem->cond.guard);
633         return 0;
634 }
635
636 void krw_sem_up_r(struct krw_sem *sem)
637 {
638         SLASSERT(sem != NULL);
639         SLASSERT(sem->magic == KRW_MAGIC);
640         SLASSERT(sem->count > 0);
641
642         kspin_lock(&sem->cond.guard);
643         -- sem->count;
644         if (sem->count == 0)
645                 kcond_broadcast_guard(&sem->cond);
646         kspin_unlock(&sem->cond.guard);
647 }
648
649 void krw_sem_up_w(struct krw_sem *sem)
650 {
651         SLASSERT(sem != NULL);
652         SLASSERT(sem->magic == KRW_MAGIC);
653         SLASSERT(sem->count == -1);
654
655         kspin_lock(&sem->cond.guard);
656         sem->count = 0;
657         kspin_unlock(&sem->cond.guard);
658         kcond_broadcast(&sem->cond);
659 }
660
661 void ksleep_chan_init(struct ksleep_chan *chan)
662 {
663         SLASSERT(chan != NULL);
664
665         kspin_init(&chan->guard);
666         CFS_INIT_LIST_HEAD(&chan->waiters);
667         ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
668 }
669
670 void ksleep_chan_done(struct ksleep_chan *chan)
671 {
672         SLASSERT(chan != NULL);
673         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
674         SLASSERT(list_empty(&chan->waiters));
675         kspin_done(&chan->guard);
676 }
677
678 void ksleep_link_init(struct ksleep_link *link)
679 {
680         SLASSERT(link != NULL);
681
682         CFS_INIT_LIST_HEAD(&link->linkage);
683         link->flags = 0;
684         link->event = current_thread();
685         link->hits  = 0;
686         link->forward = NULL;
687         ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
688 }
689
690 void ksleep_link_done(struct ksleep_link *link)
691 {
692         SLASSERT(link != NULL);
693         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
694         SLASSERT(list_empty(&link->linkage));
695 }
696
697 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
698 {
699         SLASSERT(chan != NULL);
700         SLASSERT(link != NULL);
701         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
702         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
703         SLASSERT(list_empty(&link->linkage));
704
705         kspin_lock(&chan->guard);
706         if (link->flags & KSLEEP_EXCLUSIVE)
707                 list_add_tail(&link->linkage, &chan->waiters);
708         else
709                 list_add(&link->linkage, &chan->waiters);
710         kspin_unlock(&chan->guard);
711 }
712
713 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
714 {
715         SLASSERT(chan != NULL);
716         SLASSERT(link != NULL);
717         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
718         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
719
720         kspin_lock(&chan->guard);
721         list_del_init(&link->linkage);
722         kspin_unlock(&chan->guard);
723 }
724
725 static int has_hits(struct ksleep_chan *chan, event_t event)
726 {
727         struct ksleep_link *scan;
728
729         SLASSERT(kspin_islocked(&chan->guard));
730         list_for_each_entry(scan, &chan->waiters, linkage) {
731                 if (scan->event == event && scan->hits > 0) {
732                         /* consume hit */
733                         -- scan->hits;
734                         return 1;
735                 }
736         }
737         return 0;
738 }
739
740 static void add_hit(struct ksleep_chan *chan, event_t event)
741 {
742         struct ksleep_link *scan;
743
744         /*
745          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
746          * from here: this will lead to infinite recursion.
747          */
748
749         SLASSERT(kspin_islocked(&chan->guard));
750         list_for_each_entry(scan, &chan->waiters, linkage) {
751                 if (scan->event == event) {
752                         ++ scan->hits;
753                         break;
754                 }
755         }
756 }
757
758 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
759 {
760         event_t event;
761         int     result;
762
763         ENTRY;
764
765         SLASSERT(chan != NULL);
766         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
767         SLASSERT(get_preemption_level() == 0);
768
769         event = current_thread();
770         kspin_lock(&chan->guard);
771         if (!has_hits(chan, event)) {
772                 result = assert_wait(event, state);
773                 kspin_unlock(&chan->guard);
774                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
775                 if (result == THREAD_WAITING)
776                         thread_block(THREAD_CONTINUE_NULL);
777         } else
778                 kspin_unlock(&chan->guard);
779         EXIT;
780 }
781
782 /*
783  * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
784  * sleep time (non-zero only if thread was waken by a signal (not currently
785  * implemented), or waitq was already in the "signalled" state).
786  */
787 int64_t ksleep_timedwait(struct ksleep_chan *chan, 
788                          cfs_task_state_t state,
789                          __u64 timeout)
790 {
791         event_t event;
792
793         ENTRY;
794
795         SLASSERT(chan != NULL);
796         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
797         SLASSERT(get_preemption_level() == 0);
798
799         event = current_thread();
800         kspin_lock(&chan->guard);
801         if (!has_hits(chan, event)) {
802                 int      result;
803                 __u64 expire;
804                 result = assert_wait(event, state);
805                 if (timeout > 0) {
806                         /*
807                          * arm a timer. thread_set_timer()'s first argument is
808                          * uint32_t, so we have to cook deadline ourselves.
809                          */
810                         nanoseconds_to_absolutetime(timeout, &expire);
811                         clock_absolutetime_interval_to_deadline(expire, &expire);
812                         thread_set_timer_deadline(expire);
813                 }
814                 kspin_unlock(&chan->guard);
815                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
816                 if (result == THREAD_WAITING)
817                         result = thread_block(THREAD_CONTINUE_NULL);
818                 thread_cancel_timer();
819
820                 if (result == THREAD_TIMED_OUT)
821                         timeout = 0;
822                 else {
823                         __u64 now;
824                         clock_get_uptime(&now);
825                         if (expire > now)
826                                 absolutetime_to_nanoseconds(expire - now, &timeout);
827                         else
828                                 timeout = 0;
829                 }
830         } else  {
831                 /* just return timeout, because I've got event and don't need to wait */
832                 kspin_unlock(&chan->guard);
833         }
834
835         RETURN(timeout);
836 }
837
838 /*
839  * wake up single exclusive waiter (plus some arbitrary number of *
840  * non-exclusive)
841  */
842 void ksleep_wake(struct ksleep_chan *chan)
843 {
844         /*
845          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
846          * from here: this will lead to infinite recursion.
847          */
848         ksleep_wake_nr(chan, 1);
849 }
850
851 /*
852  * wake up all waiters on @chan
853  */
854 void ksleep_wake_all(struct ksleep_chan *chan)
855 {
856         ENTRY;
857         ksleep_wake_nr(chan, 0);
858         EXIT;
859 }
860
861 /*
862  * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
863  * number of non-exclusive. If @nr is 0, wake up all waiters.
864  */
865 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
866 {
867         struct ksleep_link *scan;
868         int result;
869
870         /*
871          * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
872          * from here: this will lead to infinite recursion.
873          */
874
875         SLASSERT(chan != NULL);
876         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
877
878         kspin_lock(&chan->guard);
879         list_for_each_entry(scan, &chan->waiters, linkage) {
880                 struct ksleep_chan *forward;
881
882                 forward = scan->forward;
883                 if (forward != NULL)
884                         kspin_lock(&forward->guard);
885                 result = thread_wakeup(scan->event);
886                 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
887                 if (result == KERN_NOT_WAITING) {
888                         ++ scan->hits;
889                         if (forward != NULL)
890                                 add_hit(forward, scan->event);
891                 }
892                 if (forward != NULL)
893                         kspin_unlock(&forward->guard);
894                 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
895                         break;
896         }
897         kspin_unlock(&chan->guard);
898 }
899
900 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
901 {
902         SLASSERT(t != NULL);
903         SLASSERT(func != NULL);
904
905         kspin_init(&t->guard);
906         t->func = func;
907         t->arg  = arg;
908         ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
909 }
910
911 void ktimer_done(struct ktimer *t)
912 {
913         SLASSERT(t != NULL);
914         SLASSERT(t->magic == KTIMER_MAGIC);
915         kspin_done(&t->guard);
916         ON_SYNC_DEBUG(t->magic = 0);
917 }
918
919 static void ktimer_actor(void *arg0, void *arg1)
920 {
921         struct ktimer *t;
922         int            armed;
923
924         t = arg0;
925         /*
926          * this assumes that ktimer's are never freed.
927          */
928         SLASSERT(t != NULL);
929         SLASSERT(t->magic == KTIMER_MAGIC);
930
931         /*
932          * call actual timer function
933          */
934         kspin_lock(&t->guard);
935         armed = t->armed;
936         t->armed = 0;
937         kspin_unlock(&t->guard);
938
939         if (armed)
940                 t->func(t->arg);
941 }
942
943 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
944 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
945
946 static void ktimer_disarm_locked(struct ktimer *t)
947 {
948         SLASSERT(t != NULL);
949         SLASSERT(t->magic == KTIMER_MAGIC);
950
951         thread_call_func_cancel(ktimer_actor, t, FALSE);
952 }
953
954 /*
955  * Received deadline is nanoseconds, but time checked by 
956  * thread_call is absolute time (The abstime unit is equal to 
957  * the length of one bus cycle, so the duration is dependent 
958  * on the bus speed of the computer), so we need to convert
959  * nanotime to abstime by nanoseconds_to_absolutetime().
960  *
961  * Refer to _delayed_call_timer(...)
962  *
963  * if thread_call_func_delayed is not exported in the future,
964  * we can use timeout() or bsd_timeout() to replace it.
965  */
966 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
967 {
968         cfs_time_t    abstime;
969         SLASSERT(t != NULL);
970         SLASSERT(t->magic == KTIMER_MAGIC);
971
972         kspin_lock(&t->guard);
973         ktimer_disarm_locked(t);
974         t->armed = 1;
975         nanoseconds_to_absolutetime(deadline, &abstime);
976         thread_call_func_delayed(ktimer_actor, t, deadline);
977         kspin_unlock(&t->guard);
978 }
979
980 void ktimer_disarm(struct ktimer *t)
981 {
982         SLASSERT(t != NULL);
983         SLASSERT(t->magic == KTIMER_MAGIC);
984
985         kspin_lock(&t->guard);
986         t->armed = 0;
987         ktimer_disarm_locked(t);
988         kspin_unlock(&t->guard);
989 }
990
991 int ktimer_is_armed(struct ktimer *t)
992 {
993         SLASSERT(t != NULL);
994         SLASSERT(t->magic == KTIMER_MAGIC);
995
996         /*
997          * no locking---result is only a hint anyway.
998          */
999         return t->armed;
1000 }
1001
1002 u_int64_t ktimer_deadline(struct ktimer *t)
1003 {
1004         SLASSERT(t != NULL);
1005         SLASSERT(t->magic == KTIMER_MAGIC);
1006
1007         return t->deadline;
1008 }
1009
1010 void cfs_sync_init(void) 
1011 {
1012 #ifdef __DARWIN8__
1013         /* Initialize lock group */
1014         cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1015 #endif
1016 }
1017
1018 void cfs_sync_fini(void)
1019 {
1020 #ifdef __DARWIN8__
1021         /* 
1022          * XXX Liang: destroy lock group. As we haven't called lock_done
1023          * for all locks, cfs_lock_grp may not be freed by kernel(reference 
1024          * count > 1).
1025          */
1026         lck_grp_free(cfs_lock_grp);
1027         cfs_lock_grp = NULL;
1028 #endif
1029 }
1030 /*
1031  * Local variables:
1032  * c-indentation-style: "K&R"
1033  * c-basic-offset: 8
1034  * tab-width: 8
1035  * fill-column: 80
1036  * scroll-step: 1
1037  * End:
1038  */