1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Light Super operations
6 * Copyright (c) 2004 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or modify it under
11 * the terms of version 2 of the GNU General Public License as published by
12 * the Free Software Foundation. Lustre is distributed in the hope that it
13 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
14 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. You should have received a
16 * copy of the GNU General Public License along with Lustre; if not, write
17 * to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
24 * Created by nikita on Sun Jul 18 2004.
26 * XNU synchronization primitives.
30 * This file contains very simplistic implementations of (saner) API for
31 * basic synchronization primitives:
39 * - condition variable (kcond)
41 * - wait-queue (ksleep_chan and ksleep_link)
45 * A lot can be optimized here.
48 #define DEBUG_SUBSYSTEM S_LNET
51 # include <kern/locks.h>
53 # include <mach/mach_types.h>
54 # include <sys/types.h>
55 # include <kern/simple_lock.h>
58 #include <libcfs/libcfs.h>
60 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
62 #ifdef HAVE_GET_PREEMPTION_LEVEL
63 extern int get_preemption_level(void);
65 #define get_preemption_level() (0)
71 static lck_grp_t *cfs_lock_grp = NULL;
72 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
74 /* hw_lock_* are not exported by Darwin8 */
75 static inline void xnu_spin_init(xnu_spin_t *s)
77 SLASSERT(cfs_lock_grp != NULL);
78 //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
79 lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
82 static inline void xnu_spin_done(xnu_spin_t *s)
84 SLASSERT(cfs_lock_grp != NULL);
85 //lck_spin_free(*s, cfs_lock_grp);
87 lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
90 #define xnu_spin_lock(s) lck_spin_lock((lck_spin_t *)(s))
91 #define xnu_spin_unlock(s) lck_spin_unlock((lck_spin_t *)(s))
93 #warning "Darwin8 does not export lck_spin_try_lock"
94 #define xnu_spin_try(s) (1)
97 extern void hw_lock_init(hw_lock_t);
98 extern void hw_lock_lock(hw_lock_t);
99 extern void hw_lock_unlock(hw_lock_t);
100 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
101 extern unsigned int hw_lock_try(hw_lock_t);
102 extern unsigned int hw_lock_held(hw_lock_t);
104 #define xnu_spin_init(s) hw_lock_init(s)
105 #define xnu_spin_done(s) do {} while (0)
106 #define xnu_spin_lock(s) hw_lock_lock(s)
107 #define xnu_spin_unlock(s) hw_lock_unlock(s)
108 #define xnu_spin_try(s) hw_lock_try(s)
112 #define xnu_spin_init(s) do {} while (0)
113 #define xnu_spin_done(s) do {} while (0)
114 #define xnu_spin_lock(s) do {} while (0)
115 #define xnu_spin_unlock(s) do {} while (0)
116 #define xnu_spin_try(s) (1)
120 * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
121 * example), uses spin-locks, so debugging output here may lead to nasty
124 * In uniprocessor version of spin-lock. Only checks.
127 void kspin_init(struct kspin *spin)
129 SLASSERT(spin != NULL);
130 xnu_spin_init(&spin->lock);
131 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
132 ON_SYNC_DEBUG(spin->owner = NULL);
135 void kspin_done(struct kspin *spin)
137 SLASSERT(spin != NULL);
138 SLASSERT(spin->magic == KSPIN_MAGIC);
139 SLASSERT(spin->owner == NULL);
140 xnu_spin_done(&spin->lock);
143 void kspin_lock(struct kspin *spin)
145 SLASSERT(spin != NULL);
146 SLASSERT(spin->magic == KSPIN_MAGIC);
147 SLASSERT(spin->owner != current_thread());
150 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
151 * from here: this will lead to infinite recursion.
154 xnu_spin_lock(&spin->lock);
155 SLASSERT(spin->owner == NULL);
156 ON_SYNC_DEBUG(spin->owner = current_thread());
159 void kspin_unlock(struct kspin *spin)
162 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
163 * from here: this will lead to infinite recursion.
166 SLASSERT(spin != NULL);
167 SLASSERT(spin->magic == KSPIN_MAGIC);
168 SLASSERT(spin->owner == current_thread());
169 ON_SYNC_DEBUG(spin->owner = NULL);
170 xnu_spin_unlock(&spin->lock);
173 int kspin_trylock(struct kspin *spin)
175 SLASSERT(spin != NULL);
176 SLASSERT(spin->magic == KSPIN_MAGIC);
178 if (xnu_spin_try(&spin->lock)) {
179 SLASSERT(spin->owner == NULL);
180 ON_SYNC_DEBUG(spin->owner = current_thread());
187 int kspin_islocked(struct kspin *spin)
189 SLASSERT(spin != NULL);
190 SLASSERT(spin->magic == KSPIN_MAGIC);
191 return spin->owner == current_thread();
194 int kspin_isnotlocked(struct kspin *spin)
196 SLASSERT(spin != NULL);
197 SLASSERT(spin->magic == KSPIN_MAGIC);
198 return spin->owner != current_thread();
203 * read/write spin-lock
205 void krw_spin_init(struct krw_spin *rwspin)
207 SLASSERT(rwspin != NULL);
209 kspin_init(&rwspin->guard);
211 ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
214 void krw_spin_done(struct krw_spin *rwspin)
216 SLASSERT(rwspin != NULL);
217 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
218 SLASSERT(rwspin->count == 0);
219 kspin_done(&rwspin->guard);
222 void krw_spin_down_r(struct krw_spin *rwspin)
225 SLASSERT(rwspin != NULL);
226 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
228 kspin_lock(&rwspin->guard);
229 while(rwspin->count < 0) {
231 kspin_unlock(&rwspin->guard);
232 while (--i != 0 && rwspin->count < 0)
234 kspin_lock(&rwspin->guard);
237 kspin_unlock(&rwspin->guard);
240 void krw_spin_down_w(struct krw_spin *rwspin)
243 SLASSERT(rwspin != NULL);
244 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
246 kspin_lock(&rwspin->guard);
247 while (rwspin->count != 0) {
249 kspin_unlock(&rwspin->guard);
250 while (--i != 0 && rwspin->count != 0)
252 kspin_lock(&rwspin->guard);
255 kspin_unlock(&rwspin->guard);
258 void krw_spin_up_r(struct krw_spin *rwspin)
260 SLASSERT(rwspin != NULL);
261 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
262 SLASSERT(rwspin->count > 0);
264 kspin_lock(&rwspin->guard);
266 kspin_unlock(&rwspin->guard);
269 void krw_spin_up_w(struct krw_spin *rwspin)
271 SLASSERT(rwspin != NULL);
272 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
273 SLASSERT(rwspin->count == -1);
275 kspin_lock(&rwspin->guard);
277 kspin_unlock(&rwspin->guard);
285 #define xnu_waitq_init(q, a) do {} while (0)
286 #define xnu_waitq_done(q) do {} while (0)
287 #define xnu_waitq_wakeup_one(q, e, s) ({wakeup_one((void *)(e)); KERN_SUCCESS;})
288 #define xnu_waitq_wakeup_all(q, e, s) ({wakeup((void *)(e)); KERN_SUCCESS;})
289 #define xnu_waitq_assert_wait(q, e, s) assert_wait((e), s)
293 #define xnu_waitq_init(q, a) wait_queue_init((q), a)
294 #define xnu_waitq_done(q) do {} while (0)
295 #define xnu_waitq_wakeup_one(q, e, s) wait_queue_wakeup_one((q), (event_t)(e), s)
296 #define xnu_waitq_wakeup_all(q, e, s) wait_queue_wakeup_all((q), (event_t)(e), s)
297 #define xnu_waitq_assert_wait(q, e, s) wait_queue_assert_wait((q), (event_t)(e), s)
300 void ksem_init(struct ksem *sem, int value)
302 SLASSERT(sem != NULL);
303 kspin_init(&sem->guard);
304 xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
306 ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
309 void ksem_done(struct ksem *sem)
311 SLASSERT(sem != NULL);
312 SLASSERT(sem->magic == KSEM_MAGIC);
314 * XXX nikita: cannot check that &sem->q is empty because
315 * wait_queue_empty() is Apple private API.
317 kspin_done(&sem->guard);
320 int ksem_up(struct ksem *sem, int value)
324 SLASSERT(sem != NULL);
325 SLASSERT(sem->magic == KSEM_MAGIC);
326 SLASSERT(value >= 0);
328 kspin_lock(&sem->guard);
331 result = xnu_waitq_wakeup_one(&sem->q, sem,
334 result = xnu_waitq_wakeup_all(&sem->q, sem,
336 kspin_unlock(&sem->guard);
337 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
338 return (result == KERN_SUCCESS) ? 0 : 1;
341 void ksem_down(struct ksem *sem, int value)
345 SLASSERT(sem != NULL);
346 SLASSERT(sem->magic == KSEM_MAGIC);
347 SLASSERT(value >= 0);
348 SLASSERT(get_preemption_level() == 0);
350 kspin_lock(&sem->guard);
351 while (sem->value < value) {
352 result = xnu_waitq_assert_wait(&sem->q, sem,
354 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
355 kspin_unlock(&sem->guard);
356 if (result == THREAD_WAITING)
357 thread_block(THREAD_CONTINUE_NULL);
358 kspin_lock(&sem->guard);
361 kspin_unlock(&sem->guard);
364 int ksem_trydown(struct ksem *sem, int value)
368 SLASSERT(sem != NULL);
369 SLASSERT(sem->magic == KSEM_MAGIC);
370 SLASSERT(value >= 0);
372 kspin_lock(&sem->guard);
373 if (sem->value >= value) {
378 kspin_unlock(&sem->guard);
382 void kmut_init(struct kmut *mut)
384 SLASSERT(mut != NULL);
385 ksem_init(&mut->s, 1);
386 ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
387 ON_SYNC_DEBUG(mut->owner = NULL);
390 void kmut_done(struct kmut *mut)
392 SLASSERT(mut != NULL);
393 SLASSERT(mut->magic == KMUT_MAGIC);
394 SLASSERT(mut->owner == NULL);
398 void kmut_lock(struct kmut *mut)
400 SLASSERT(mut != NULL);
401 SLASSERT(mut->magic == KMUT_MAGIC);
402 SLASSERT(mut->owner != current_thread());
403 SLASSERT(get_preemption_level() == 0);
405 ksem_down(&mut->s, 1);
406 ON_SYNC_DEBUG(mut->owner = current_thread());
409 void kmut_unlock(struct kmut *mut)
411 SLASSERT(mut != NULL);
412 SLASSERT(mut->magic == KMUT_MAGIC);
413 SLASSERT(mut->owner == current_thread());
415 ON_SYNC_DEBUG(mut->owner = NULL);
419 int kmut_trylock(struct kmut *mut)
421 SLASSERT(mut != NULL);
422 SLASSERT(mut->magic == KMUT_MAGIC);
423 return ksem_trydown(&mut->s, 1);
427 int kmut_islocked(struct kmut *mut)
429 SLASSERT(mut != NULL);
430 SLASSERT(mut->magic == KMUT_MAGIC);
431 return mut->owner == current_thread();
434 int kmut_isnotlocked(struct kmut *mut)
436 SLASSERT(mut != NULL);
437 SLASSERT(mut->magic == KMUT_MAGIC);
438 return mut->owner != current_thread();
443 void kcond_init(struct kcond *cond)
445 SLASSERT(cond != NULL);
447 kspin_init(&cond->guard);
448 cond->waiters = NULL;
449 ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
452 void kcond_done(struct kcond *cond)
454 SLASSERT(cond != NULL);
455 SLASSERT(cond->magic == KCOND_MAGIC);
456 SLASSERT(cond->waiters == NULL);
457 kspin_done(&cond->guard);
460 void kcond_wait(struct kcond *cond, struct kspin *lock)
462 struct kcond_link link;
464 SLASSERT(cond != NULL);
465 SLASSERT(lock != NULL);
466 SLASSERT(cond->magic == KCOND_MAGIC);
467 SLASSERT(kspin_islocked(lock));
469 ksem_init(&link.sem, 0);
470 kspin_lock(&cond->guard);
471 link.next = cond->waiters;
472 cond->waiters = &link;
473 kspin_unlock(&cond->guard);
476 ksem_down(&link.sem, 1);
478 kspin_lock(&cond->guard);
479 kspin_unlock(&cond->guard);
483 void kcond_wait_guard(struct kcond *cond)
485 struct kcond_link link;
487 SLASSERT(cond != NULL);
488 SLASSERT(cond->magic == KCOND_MAGIC);
489 SLASSERT(kspin_islocked(&cond->guard));
491 ksem_init(&link.sem, 0);
492 link.next = cond->waiters;
493 cond->waiters = &link;
494 kspin_unlock(&cond->guard);
496 ksem_down(&link.sem, 1);
498 kspin_lock(&cond->guard);
501 void kcond_signal_guard(struct kcond *cond)
503 struct kcond_link *link;
505 SLASSERT(cond != NULL);
506 SLASSERT(cond->magic == KCOND_MAGIC);
507 SLASSERT(kspin_islocked(&cond->guard));
509 link = cond->waiters;
511 cond->waiters = link->next;
512 ksem_up(&link->sem, 1);
516 void kcond_signal(struct kcond *cond)
518 SLASSERT(cond != NULL);
519 SLASSERT(cond->magic == KCOND_MAGIC);
521 kspin_lock(&cond->guard);
522 kcond_signal_guard(cond);
523 kspin_unlock(&cond->guard);
526 void kcond_broadcast_guard(struct kcond *cond)
528 struct kcond_link *link;
530 SLASSERT(cond != NULL);
531 SLASSERT(cond->magic == KCOND_MAGIC);
532 SLASSERT(kspin_islocked(&cond->guard));
534 for (link = cond->waiters; link != NULL; link = link->next)
535 ksem_up(&link->sem, 1);
536 cond->waiters = NULL;
539 void kcond_broadcast(struct kcond *cond)
541 SLASSERT(cond != NULL);
542 SLASSERT(cond->magic == KCOND_MAGIC);
544 kspin_lock(&cond->guard);
545 kcond_broadcast_guard(cond);
546 kspin_unlock(&cond->guard);
549 void krw_sem_init(struct krw_sem *sem)
551 SLASSERT(sem != NULL);
553 kcond_init(&sem->cond);
555 ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
558 void krw_sem_done(struct krw_sem *sem)
560 SLASSERT(sem != NULL);
561 SLASSERT(sem->magic == KRW_MAGIC);
562 SLASSERT(sem->count == 0);
563 kcond_done(&sem->cond);
566 void krw_sem_down_r(struct krw_sem *sem)
568 SLASSERT(sem != NULL);
569 SLASSERT(sem->magic == KRW_MAGIC);
570 SLASSERT(get_preemption_level() == 0);
572 kspin_lock(&sem->cond.guard);
573 while (sem->count < 0)
574 kcond_wait_guard(&sem->cond);
576 kspin_unlock(&sem->cond.guard);
579 int krw_sem_down_r_try(struct krw_sem *sem)
581 SLASSERT(sem != NULL);
582 SLASSERT(sem->magic == KRW_MAGIC);
584 kspin_lock(&sem->cond.guard);
585 if (sem->count < 0) {
586 kspin_unlock(&sem->cond.guard);
590 kspin_unlock(&sem->cond.guard);
594 void krw_sem_down_w(struct krw_sem *sem)
596 SLASSERT(sem != NULL);
597 SLASSERT(sem->magic == KRW_MAGIC);
598 SLASSERT(get_preemption_level() == 0);
600 kspin_lock(&sem->cond.guard);
601 while (sem->count != 0)
602 kcond_wait_guard(&sem->cond);
604 kspin_unlock(&sem->cond.guard);
607 int krw_sem_down_w_try(struct krw_sem *sem)
609 SLASSERT(sem != NULL);
610 SLASSERT(sem->magic == KRW_MAGIC);
612 kspin_lock(&sem->cond.guard);
613 if (sem->count != 0) {
614 kspin_unlock(&sem->cond.guard);
618 kspin_unlock(&sem->cond.guard);
622 void krw_sem_up_r(struct krw_sem *sem)
624 SLASSERT(sem != NULL);
625 SLASSERT(sem->magic == KRW_MAGIC);
626 SLASSERT(sem->count > 0);
628 kspin_lock(&sem->cond.guard);
631 kcond_broadcast_guard(&sem->cond);
632 kspin_unlock(&sem->cond.guard);
635 void krw_sem_up_w(struct krw_sem *sem)
637 SLASSERT(sem != NULL);
638 SLASSERT(sem->magic == KRW_MAGIC);
639 SLASSERT(sem->count == -1);
641 kspin_lock(&sem->cond.guard);
643 kspin_unlock(&sem->cond.guard);
644 kcond_broadcast(&sem->cond);
647 void ksleep_chan_init(struct ksleep_chan *chan)
649 SLASSERT(chan != NULL);
651 kspin_init(&chan->guard);
652 CFS_INIT_LIST_HEAD(&chan->waiters);
653 ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
656 void ksleep_chan_done(struct ksleep_chan *chan)
658 SLASSERT(chan != NULL);
659 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
660 SLASSERT(list_empty(&chan->waiters));
661 kspin_done(&chan->guard);
664 void ksleep_link_init(struct ksleep_link *link)
666 SLASSERT(link != NULL);
668 CFS_INIT_LIST_HEAD(&link->linkage);
670 link->event = current_thread();
672 link->forward = NULL;
673 ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
676 void ksleep_link_done(struct ksleep_link *link)
678 SLASSERT(link != NULL);
679 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
680 SLASSERT(list_empty(&link->linkage));
683 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
685 SLASSERT(chan != NULL);
686 SLASSERT(link != NULL);
687 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
688 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
689 SLASSERT(list_empty(&link->linkage));
691 kspin_lock(&chan->guard);
692 if (link->flags & KSLEEP_EXCLUSIVE)
693 list_add_tail(&link->linkage, &chan->waiters);
695 list_add(&link->linkage, &chan->waiters);
696 kspin_unlock(&chan->guard);
699 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
701 SLASSERT(chan != NULL);
702 SLASSERT(link != NULL);
703 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
704 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
706 kspin_lock(&chan->guard);
707 list_del_init(&link->linkage);
708 kspin_unlock(&chan->guard);
711 static int has_hits(struct ksleep_chan *chan, event_t event)
713 struct ksleep_link *scan;
715 SLASSERT(kspin_islocked(&chan->guard));
716 list_for_each_entry(scan, &chan->waiters, linkage) {
717 if (scan->event == event && scan->hits > 0) {
726 static void add_hit(struct ksleep_chan *chan, event_t event)
728 struct ksleep_link *scan;
731 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
732 * from here: this will lead to infinite recursion.
735 SLASSERT(kspin_islocked(&chan->guard));
736 list_for_each_entry(scan, &chan->waiters, linkage) {
737 if (scan->event == event) {
744 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
751 SLASSERT(chan != NULL);
752 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
753 SLASSERT(get_preemption_level() == 0);
755 event = current_thread();
756 kspin_lock(&chan->guard);
757 if (!has_hits(chan, event)) {
758 result = assert_wait(event, state);
759 kspin_unlock(&chan->guard);
760 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
761 if (result == THREAD_WAITING)
762 thread_block(THREAD_CONTINUE_NULL);
764 kspin_unlock(&chan->guard);
769 * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
770 * sleep time (non-zero only if thread was waken by a signal (not currently
771 * implemented), or waitq was already in the "signalled" state).
773 int64_t ksleep_timedwait(struct ksleep_chan *chan,
774 cfs_task_state_t state,
781 SLASSERT(chan != NULL);
782 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
783 SLASSERT(get_preemption_level() == 0);
785 event = current_thread();
786 kspin_lock(&chan->guard);
787 if (!has_hits(chan, event)) {
790 result = assert_wait(event, state);
793 * arm a timer. thread_set_timer()'s first argument is
794 * uint32_t, so we have to cook deadline ourselves.
796 nanoseconds_to_absolutetime(timeout, &expire);
797 clock_absolutetime_interval_to_deadline(expire, &expire);
798 thread_set_timer_deadline(expire);
800 kspin_unlock(&chan->guard);
801 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
802 if (result == THREAD_WAITING)
803 result = thread_block(THREAD_CONTINUE_NULL);
804 thread_cancel_timer();
806 if (result == THREAD_TIMED_OUT)
810 clock_get_uptime(&now);
812 absolutetime_to_nanoseconds(expire - now, &timeout);
817 /* just return timeout, because I've got event and don't need to wait */
818 kspin_unlock(&chan->guard);
825 * wake up single exclusive waiter (plus some arbitrary number of *
828 void ksleep_wake(struct ksleep_chan *chan)
831 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
832 * from here: this will lead to infinite recursion.
834 ksleep_wake_nr(chan, 1);
838 * wake up all waiters on @chan
840 void ksleep_wake_all(struct ksleep_chan *chan)
843 ksleep_wake_nr(chan, 0);
848 * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
849 * number of non-exclusive. If @nr is 0, wake up all waiters.
851 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
853 struct ksleep_link *scan;
857 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
858 * from here: this will lead to infinite recursion.
861 SLASSERT(chan != NULL);
862 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
864 kspin_lock(&chan->guard);
865 list_for_each_entry(scan, &chan->waiters, linkage) {
866 struct ksleep_chan *forward;
868 forward = scan->forward;
870 kspin_lock(&forward->guard);
871 result = thread_wakeup(scan->event);
872 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
873 if (result == KERN_NOT_WAITING) {
876 add_hit(forward, scan->event);
879 kspin_unlock(&forward->guard);
880 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
883 kspin_unlock(&chan->guard);
886 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
889 SLASSERT(func != NULL);
891 kspin_init(&t->guard);
894 ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
897 void ktimer_done(struct ktimer *t)
900 SLASSERT(t->magic == KTIMER_MAGIC);
901 kspin_done(&t->guard);
902 ON_SYNC_DEBUG(t->magic = 0);
905 static void ktimer_actor(void *arg0, void *arg1)
912 * this assumes that ktimer's are never freed.
915 SLASSERT(t->magic == KTIMER_MAGIC);
918 * call actual timer function
920 kspin_lock(&t->guard);
923 kspin_unlock(&t->guard);
929 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
930 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
932 static void ktimer_disarm_locked(struct ktimer *t)
935 SLASSERT(t->magic == KTIMER_MAGIC);
937 thread_call_func_cancel(ktimer_actor, t, FALSE);
941 * Received deadline is nanoseconds, but time checked by
942 * thread_call is absolute time (The abstime unit is equal to
943 * the length of one bus cycle, so the duration is dependent
944 * on the bus speed of the computer), so we need to convert
945 * nanotime to abstime by nanoseconds_to_absolutetime().
947 * Refer to _delayed_call_timer(...)
949 * if thread_call_func_delayed is not exported in the future,
950 * we can use timeout() or bsd_timeout() to replace it.
952 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
956 SLASSERT(t->magic == KTIMER_MAGIC);
958 kspin_lock(&t->guard);
959 ktimer_disarm_locked(t);
961 nanoseconds_to_absolutetime(deadline, &abstime);
962 thread_call_func_delayed(ktimer_actor, t, deadline);
963 kspin_unlock(&t->guard);
966 void ktimer_disarm(struct ktimer *t)
969 SLASSERT(t->magic == KTIMER_MAGIC);
971 kspin_lock(&t->guard);
973 ktimer_disarm_locked(t);
974 kspin_unlock(&t->guard);
977 int ktimer_is_armed(struct ktimer *t)
980 SLASSERT(t->magic == KTIMER_MAGIC);
983 * no locking---result is only a hint anyway.
988 u_int64_t ktimer_deadline(struct ktimer *t)
991 SLASSERT(t->magic == KTIMER_MAGIC);
996 void cfs_sync_init(void)
999 /* Initialize lock group */
1000 cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1004 void cfs_sync_fini(void)
1008 * XXX Liang: destroy lock group. As we haven't called lock_done
1009 * for all locks, cfs_lock_grp may not be freed by kernel(reference
1012 lck_grp_free(cfs_lock_grp);
1013 cfs_lock_grp = NULL;
1018 * c-indentation-style: "K&R"