1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Light Super operations
6 * Copyright (c) 2004 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or modify it under
11 * the terms of version 2 of the GNU General Public License as published by
12 * the Free Software Foundation. Lustre is distributed in the hope that it
13 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
14 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. You should have received a
16 * copy of the GNU General Public License along with Lustre; if not, write
17 * to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
24 * Created by nikita on Sun Jul 18 2004.
26 * XNU synchronization primitives.
30 * This file contains very simplistic implementations of (saner) API for
31 * basic synchronization primitives:
39 * - condition variable (kcond)
41 * - wait-queue (ksleep_chan and ksleep_link)
45 * A lot can be optimized here.
48 #define DEBUG_SUBSYSTEM S_LNET
51 # include <kern/locks.h>
53 # include <mach/mach_types.h>
54 # include <sys/types.h>
55 # include <kern/simple_lock.h>
58 #include <libcfs/libcfs.h>
59 #include <libcfs/kp30.h>
61 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
63 #ifdef HAVE_GET_PREEMPTION_LEVEL
64 extern int get_preemption_level(void);
66 #define get_preemption_level() (0)
72 static lck_grp_t *cfs_lock_grp = NULL;
73 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
75 /* hw_lock_* are not exported by Darwin8 */
76 static inline void xnu_spin_init(xnu_spin_t *s)
78 SLASSERT(cfs_lock_grp != NULL);
79 //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
80 lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
83 static inline void xnu_spin_done(xnu_spin_t *s)
85 SLASSERT(cfs_lock_grp != NULL);
86 //lck_spin_free(*s, cfs_lock_grp);
88 lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
91 #define xnu_spin_lock(s) lck_spin_lock((lck_spin_t *)(s))
92 #define xnu_spin_unlock(s) lck_spin_unlock((lck_spin_t *)(s))
94 #warning "Darwin8 does not export lck_spin_try_lock"
95 #define xnu_spin_try(s) (1)
98 extern void hw_lock_init(hw_lock_t);
99 extern void hw_lock_lock(hw_lock_t);
100 extern void hw_lock_unlock(hw_lock_t);
101 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
102 extern unsigned int hw_lock_try(hw_lock_t);
103 extern unsigned int hw_lock_held(hw_lock_t);
105 #define xnu_spin_init(s) hw_lock_init(s)
106 #define xnu_spin_done(s) do {} while (0)
107 #define xnu_spin_lock(s) hw_lock_lock(s)
108 #define xnu_spin_unlock(s) hw_lock_unlock(s)
109 #define xnu_spin_try(s) hw_lock_try(s)
113 #define xnu_spin_init(s) do {} while (0)
114 #define xnu_spin_done(s) do {} while (0)
115 #define xnu_spin_lock(s) do {} while (0)
116 #define xnu_spin_unlock(s) do {} while (0)
117 #define xnu_spin_try(s) (1)
121 * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
122 * example), uses spin-locks, so debugging output here may lead to nasty
125 * In uniprocessor version of spin-lock. Only checks.
128 void kspin_init(struct kspin *spin)
130 SLASSERT(spin != NULL);
131 xnu_spin_init(&spin->lock);
132 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
133 ON_SYNC_DEBUG(spin->owner = NULL);
136 void kspin_done(struct kspin *spin)
138 SLASSERT(spin != NULL);
139 SLASSERT(spin->magic == KSPIN_MAGIC);
140 SLASSERT(spin->owner == NULL);
141 xnu_spin_done(&spin->lock);
144 void kspin_lock(struct kspin *spin)
146 SLASSERT(spin != NULL);
147 SLASSERT(spin->magic == KSPIN_MAGIC);
148 SLASSERT(spin->owner != current_thread());
151 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
152 * from here: this will lead to infinite recursion.
155 xnu_spin_lock(&spin->lock);
156 SLASSERT(spin->owner == NULL);
157 ON_SYNC_DEBUG(spin->owner = current_thread());
160 void kspin_unlock(struct kspin *spin)
163 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
164 * from here: this will lead to infinite recursion.
167 SLASSERT(spin != NULL);
168 SLASSERT(spin->magic == KSPIN_MAGIC);
169 SLASSERT(spin->owner == current_thread());
170 ON_SYNC_DEBUG(spin->owner = NULL);
171 xnu_spin_unlock(&spin->lock);
174 int kspin_trylock(struct kspin *spin)
176 SLASSERT(spin != NULL);
177 SLASSERT(spin->magic == KSPIN_MAGIC);
179 if (xnu_spin_try(&spin->lock)) {
180 SLASSERT(spin->owner == NULL);
181 ON_SYNC_DEBUG(spin->owner = current_thread());
188 int kspin_islocked(struct kspin *spin)
190 SLASSERT(spin != NULL);
191 SLASSERT(spin->magic == KSPIN_MAGIC);
192 return spin->owner == current_thread();
195 int kspin_isnotlocked(struct kspin *spin)
197 SLASSERT(spin != NULL);
198 SLASSERT(spin->magic == KSPIN_MAGIC);
199 return spin->owner != current_thread();
204 * read/write spin-lock
206 void krw_spin_init(struct krw_spin *rwspin)
208 SLASSERT(rwspin != NULL);
210 kspin_init(&rwspin->guard);
212 ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
215 void krw_spin_done(struct krw_spin *rwspin)
217 SLASSERT(rwspin != NULL);
218 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
219 SLASSERT(rwspin->count == 0);
220 kspin_done(&rwspin->guard);
223 void krw_spin_down_r(struct krw_spin *rwspin)
226 SLASSERT(rwspin != NULL);
227 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
229 kspin_lock(&rwspin->guard);
230 while(rwspin->count < 0) {
232 kspin_unlock(&rwspin->guard);
233 while (--i != 0 && rwspin->count < 0)
235 kspin_lock(&rwspin->guard);
238 kspin_unlock(&rwspin->guard);
241 void krw_spin_down_w(struct krw_spin *rwspin)
244 SLASSERT(rwspin != NULL);
245 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
247 kspin_lock(&rwspin->guard);
248 while (rwspin->count != 0) {
250 kspin_unlock(&rwspin->guard);
251 while (--i != 0 && rwspin->count != 0)
253 kspin_lock(&rwspin->guard);
256 kspin_unlock(&rwspin->guard);
259 void krw_spin_up_r(struct krw_spin *rwspin)
261 SLASSERT(rwspin != NULL);
262 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
263 SLASSERT(rwspin->count > 0);
265 kspin_lock(&rwspin->guard);
267 kspin_unlock(&rwspin->guard);
270 void krw_spin_up_w(struct krw_spin *rwspin)
272 SLASSERT(rwspin != NULL);
273 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
274 SLASSERT(rwspin->count == -1);
276 kspin_lock(&rwspin->guard);
278 kspin_unlock(&rwspin->guard);
286 #define xnu_waitq_init(q, a) do {} while (0)
287 #define xnu_waitq_done(q) do {} while (0)
288 #define xnu_waitq_wakeup_one(q, e, s) ({wakeup_one((void *)(e)); KERN_SUCCESS;})
289 #define xnu_waitq_wakeup_all(q, e, s) ({wakeup((void *)(e)); KERN_SUCCESS;})
290 #define xnu_waitq_assert_wait(q, e, s) assert_wait((e), s)
294 #define xnu_waitq_init(q, a) wait_queue_init((q), a)
295 #define xnu_waitq_done(q) do {} while (0)
296 #define xnu_waitq_wakeup_one(q, e, s) wait_queue_wakeup_one((q), (event_t)(e), s)
297 #define xnu_waitq_wakeup_all(q, e, s) wait_queue_wakeup_all((q), (event_t)(e), s)
298 #define xnu_waitq_assert_wait(q, e, s) wait_queue_assert_wait((q), (event_t)(e), s)
301 void ksem_init(struct ksem *sem, int value)
303 SLASSERT(sem != NULL);
304 kspin_init(&sem->guard);
305 xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
307 ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
310 void ksem_done(struct ksem *sem)
312 SLASSERT(sem != NULL);
313 SLASSERT(sem->magic == KSEM_MAGIC);
315 * XXX nikita: cannot check that &sem->q is empty because
316 * wait_queue_empty() is Apple private API.
318 kspin_done(&sem->guard);
321 int ksem_up(struct ksem *sem, int value)
325 SLASSERT(sem != NULL);
326 SLASSERT(sem->magic == KSEM_MAGIC);
327 SLASSERT(value >= 0);
329 kspin_lock(&sem->guard);
332 result = xnu_waitq_wakeup_one(&sem->q, sem,
335 result = xnu_waitq_wakeup_all(&sem->q, sem,
337 kspin_unlock(&sem->guard);
338 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
339 return (result == KERN_SUCCESS) ? 0 : 1;
342 void ksem_down(struct ksem *sem, int value)
346 SLASSERT(sem != NULL);
347 SLASSERT(sem->magic == KSEM_MAGIC);
348 SLASSERT(value >= 0);
349 SLASSERT(get_preemption_level() == 0);
351 kspin_lock(&sem->guard);
352 while (sem->value < value) {
353 result = xnu_waitq_assert_wait(&sem->q, sem,
355 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
356 kspin_unlock(&sem->guard);
357 if (result == THREAD_WAITING)
358 thread_block(THREAD_CONTINUE_NULL);
359 kspin_lock(&sem->guard);
362 kspin_unlock(&sem->guard);
365 int ksem_trydown(struct ksem *sem, int value)
369 SLASSERT(sem != NULL);
370 SLASSERT(sem->magic == KSEM_MAGIC);
371 SLASSERT(value >= 0);
373 kspin_lock(&sem->guard);
374 if (sem->value >= value) {
379 kspin_unlock(&sem->guard);
383 void kmut_init(struct kmut *mut)
385 SLASSERT(mut != NULL);
386 ksem_init(&mut->s, 1);
387 ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
388 ON_SYNC_DEBUG(mut->owner = NULL);
391 void kmut_done(struct kmut *mut)
393 SLASSERT(mut != NULL);
394 SLASSERT(mut->magic == KMUT_MAGIC);
395 SLASSERT(mut->owner == NULL);
399 void kmut_lock(struct kmut *mut)
401 SLASSERT(mut != NULL);
402 SLASSERT(mut->magic == KMUT_MAGIC);
403 SLASSERT(mut->owner != current_thread());
404 SLASSERT(get_preemption_level() == 0);
406 ksem_down(&mut->s, 1);
407 ON_SYNC_DEBUG(mut->owner = current_thread());
410 void kmut_unlock(struct kmut *mut)
412 SLASSERT(mut != NULL);
413 SLASSERT(mut->magic == KMUT_MAGIC);
414 SLASSERT(mut->owner == current_thread());
416 ON_SYNC_DEBUG(mut->owner = NULL);
420 int kmut_trylock(struct kmut *mut)
422 SLASSERT(mut != NULL);
423 SLASSERT(mut->magic == KMUT_MAGIC);
424 return ksem_trydown(&mut->s, 1);
428 int kmut_islocked(struct kmut *mut)
430 SLASSERT(mut != NULL);
431 SLASSERT(mut->magic == KMUT_MAGIC);
432 return mut->owner == current_thread();
435 int kmut_isnotlocked(struct kmut *mut)
437 SLASSERT(mut != NULL);
438 SLASSERT(mut->magic == KMUT_MAGIC);
439 return mut->owner != current_thread();
444 void kcond_init(struct kcond *cond)
446 SLASSERT(cond != NULL);
448 kspin_init(&cond->guard);
449 cond->waiters = NULL;
450 ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
453 void kcond_done(struct kcond *cond)
455 SLASSERT(cond != NULL);
456 SLASSERT(cond->magic == KCOND_MAGIC);
457 SLASSERT(cond->waiters == NULL);
458 kspin_done(&cond->guard);
461 void kcond_wait(struct kcond *cond, struct kspin *lock)
463 struct kcond_link link;
465 SLASSERT(cond != NULL);
466 SLASSERT(lock != NULL);
467 SLASSERT(cond->magic == KCOND_MAGIC);
468 SLASSERT(kspin_islocked(lock));
470 ksem_init(&link.sem, 0);
471 kspin_lock(&cond->guard);
472 link.next = cond->waiters;
473 cond->waiters = &link;
474 kspin_unlock(&cond->guard);
477 ksem_down(&link.sem, 1);
479 kspin_lock(&cond->guard);
480 kspin_unlock(&cond->guard);
484 void kcond_wait_guard(struct kcond *cond)
486 struct kcond_link link;
488 SLASSERT(cond != NULL);
489 SLASSERT(cond->magic == KCOND_MAGIC);
490 SLASSERT(kspin_islocked(&cond->guard));
492 ksem_init(&link.sem, 0);
493 link.next = cond->waiters;
494 cond->waiters = &link;
495 kspin_unlock(&cond->guard);
497 ksem_down(&link.sem, 1);
499 kspin_lock(&cond->guard);
502 void kcond_signal_guard(struct kcond *cond)
504 struct kcond_link *link;
506 SLASSERT(cond != NULL);
507 SLASSERT(cond->magic == KCOND_MAGIC);
508 SLASSERT(kspin_islocked(&cond->guard));
510 link = cond->waiters;
512 cond->waiters = link->next;
513 ksem_up(&link->sem, 1);
517 void kcond_signal(struct kcond *cond)
519 SLASSERT(cond != NULL);
520 SLASSERT(cond->magic == KCOND_MAGIC);
522 kspin_lock(&cond->guard);
523 kcond_signal_guard(cond);
524 kspin_unlock(&cond->guard);
527 void kcond_broadcast_guard(struct kcond *cond)
529 struct kcond_link *link;
531 SLASSERT(cond != NULL);
532 SLASSERT(cond->magic == KCOND_MAGIC);
533 SLASSERT(kspin_islocked(&cond->guard));
535 for (link = cond->waiters; link != NULL; link = link->next)
536 ksem_up(&link->sem, 1);
537 cond->waiters = NULL;
540 void kcond_broadcast(struct kcond *cond)
542 SLASSERT(cond != NULL);
543 SLASSERT(cond->magic == KCOND_MAGIC);
545 kspin_lock(&cond->guard);
546 kcond_broadcast_guard(cond);
547 kspin_unlock(&cond->guard);
550 void krw_sem_init(struct krw_sem *sem)
552 SLASSERT(sem != NULL);
554 kcond_init(&sem->cond);
556 ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
559 void krw_sem_done(struct krw_sem *sem)
561 SLASSERT(sem != NULL);
562 SLASSERT(sem->magic == KRW_MAGIC);
563 SLASSERT(sem->count == 0);
564 kcond_done(&sem->cond);
567 void krw_sem_down_r(struct krw_sem *sem)
569 SLASSERT(sem != NULL);
570 SLASSERT(sem->magic == KRW_MAGIC);
571 SLASSERT(get_preemption_level() == 0);
573 kspin_lock(&sem->cond.guard);
574 while (sem->count < 0)
575 kcond_wait_guard(&sem->cond);
577 kspin_unlock(&sem->cond.guard);
580 int krw_sem_down_r_try(struct krw_sem *sem)
582 SLASSERT(sem != NULL);
583 SLASSERT(sem->magic == KRW_MAGIC);
585 kspin_lock(&sem->cond.guard);
586 if (sem->count < 0) {
587 kspin_unlock(&sem->cond.guard);
591 kspin_unlock(&sem->cond.guard);
595 void krw_sem_down_w(struct krw_sem *sem)
597 SLASSERT(sem != NULL);
598 SLASSERT(sem->magic == KRW_MAGIC);
599 SLASSERT(get_preemption_level() == 0);
601 kspin_lock(&sem->cond.guard);
602 while (sem->count != 0)
603 kcond_wait_guard(&sem->cond);
605 kspin_unlock(&sem->cond.guard);
608 int krw_sem_down_w_try(struct krw_sem *sem)
610 SLASSERT(sem != NULL);
611 SLASSERT(sem->magic == KRW_MAGIC);
613 kspin_lock(&sem->cond.guard);
614 if (sem->count != 0) {
615 kspin_unlock(&sem->cond.guard);
619 kspin_unlock(&sem->cond.guard);
623 void krw_sem_up_r(struct krw_sem *sem)
625 SLASSERT(sem != NULL);
626 SLASSERT(sem->magic == KRW_MAGIC);
627 SLASSERT(sem->count > 0);
629 kspin_lock(&sem->cond.guard);
632 kcond_broadcast_guard(&sem->cond);
633 kspin_unlock(&sem->cond.guard);
636 void krw_sem_up_w(struct krw_sem *sem)
638 SLASSERT(sem != NULL);
639 SLASSERT(sem->magic == KRW_MAGIC);
640 SLASSERT(sem->count == -1);
642 kspin_lock(&sem->cond.guard);
644 kspin_unlock(&sem->cond.guard);
645 kcond_broadcast(&sem->cond);
648 void ksleep_chan_init(struct ksleep_chan *chan)
650 SLASSERT(chan != NULL);
652 kspin_init(&chan->guard);
653 CFS_INIT_LIST_HEAD(&chan->waiters);
654 ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
657 void ksleep_chan_done(struct ksleep_chan *chan)
659 SLASSERT(chan != NULL);
660 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
661 SLASSERT(list_empty(&chan->waiters));
662 kspin_done(&chan->guard);
665 void ksleep_link_init(struct ksleep_link *link)
667 SLASSERT(link != NULL);
669 CFS_INIT_LIST_HEAD(&link->linkage);
671 link->event = current_thread();
673 link->forward = NULL;
674 ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
677 void ksleep_link_done(struct ksleep_link *link)
679 SLASSERT(link != NULL);
680 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
681 SLASSERT(list_empty(&link->linkage));
684 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
686 SLASSERT(chan != NULL);
687 SLASSERT(link != NULL);
688 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
689 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
690 SLASSERT(list_empty(&link->linkage));
692 kspin_lock(&chan->guard);
693 if (link->flags & KSLEEP_EXCLUSIVE)
694 list_add_tail(&link->linkage, &chan->waiters);
696 list_add(&link->linkage, &chan->waiters);
697 kspin_unlock(&chan->guard);
700 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
702 SLASSERT(chan != NULL);
703 SLASSERT(link != NULL);
704 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
705 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
707 kspin_lock(&chan->guard);
708 list_del_init(&link->linkage);
709 kspin_unlock(&chan->guard);
712 static int has_hits(struct ksleep_chan *chan, event_t event)
714 struct ksleep_link *scan;
716 SLASSERT(kspin_islocked(&chan->guard));
717 list_for_each_entry(scan, &chan->waiters, linkage) {
718 if (scan->event == event && scan->hits > 0) {
727 static void add_hit(struct ksleep_chan *chan, event_t event)
729 struct ksleep_link *scan;
732 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
733 * from here: this will lead to infinite recursion.
736 SLASSERT(kspin_islocked(&chan->guard));
737 list_for_each_entry(scan, &chan->waiters, linkage) {
738 if (scan->event == event) {
745 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
752 SLASSERT(chan != NULL);
753 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
754 SLASSERT(get_preemption_level() == 0);
756 event = current_thread();
757 kspin_lock(&chan->guard);
758 if (!has_hits(chan, event)) {
759 result = assert_wait(event, state);
760 kspin_unlock(&chan->guard);
761 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
762 if (result == THREAD_WAITING)
763 thread_block(THREAD_CONTINUE_NULL);
765 kspin_unlock(&chan->guard);
770 * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
771 * sleep time (non-zero only if thread was waken by a signal (not currently
772 * implemented), or waitq was already in the "signalled" state).
774 int64_t ksleep_timedwait(struct ksleep_chan *chan,
775 cfs_task_state_t state,
782 SLASSERT(chan != NULL);
783 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
784 SLASSERT(get_preemption_level() == 0);
786 event = current_thread();
787 kspin_lock(&chan->guard);
788 if (!has_hits(chan, event)) {
791 result = assert_wait(event, state);
794 * arm a timer. thread_set_timer()'s first argument is
795 * uint32_t, so we have to cook deadline ourselves.
797 nanoseconds_to_absolutetime(timeout, &expire);
798 clock_absolutetime_interval_to_deadline(expire, &expire);
799 thread_set_timer_deadline(expire);
801 kspin_unlock(&chan->guard);
802 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
803 if (result == THREAD_WAITING)
804 result = thread_block(THREAD_CONTINUE_NULL);
805 thread_cancel_timer();
807 if (result == THREAD_TIMED_OUT)
811 clock_get_uptime(&now);
813 absolutetime_to_nanoseconds(expire - now, &timeout);
818 /* just return timeout, because I've got event and don't need to wait */
819 kspin_unlock(&chan->guard);
826 * wake up single exclusive waiter (plus some arbitrary number of *
829 void ksleep_wake(struct ksleep_chan *chan)
832 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
833 * from here: this will lead to infinite recursion.
835 ksleep_wake_nr(chan, 1);
839 * wake up all waiters on @chan
841 void ksleep_wake_all(struct ksleep_chan *chan)
844 ksleep_wake_nr(chan, 0);
849 * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
850 * number of non-exclusive. If @nr is 0, wake up all waiters.
852 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
854 struct ksleep_link *scan;
858 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
859 * from here: this will lead to infinite recursion.
862 SLASSERT(chan != NULL);
863 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
865 kspin_lock(&chan->guard);
866 list_for_each_entry(scan, &chan->waiters, linkage) {
867 struct ksleep_chan *forward;
869 forward = scan->forward;
871 kspin_lock(&forward->guard);
872 result = thread_wakeup(scan->event);
873 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
874 if (result == KERN_NOT_WAITING) {
877 add_hit(forward, scan->event);
880 kspin_unlock(&forward->guard);
881 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
884 kspin_unlock(&chan->guard);
887 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
890 SLASSERT(func != NULL);
892 kspin_init(&t->guard);
895 ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
898 void ktimer_done(struct ktimer *t)
901 SLASSERT(t->magic == KTIMER_MAGIC);
902 kspin_done(&t->guard);
903 ON_SYNC_DEBUG(t->magic = 0);
906 static void ktimer_actor(void *arg0, void *arg1)
913 * this assumes that ktimer's are never freed.
916 SLASSERT(t->magic == KTIMER_MAGIC);
919 * call actual timer function
921 kspin_lock(&t->guard);
924 kspin_unlock(&t->guard);
930 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
931 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
933 static void ktimer_disarm_locked(struct ktimer *t)
936 SLASSERT(t->magic == KTIMER_MAGIC);
938 thread_call_func_cancel(ktimer_actor, t, FALSE);
942 * Received deadline is nanoseconds, but time checked by
943 * thread_call is absolute time (The abstime unit is equal to
944 * the length of one bus cycle, so the duration is dependent
945 * on the bus speed of the computer), so we need to convert
946 * nanotime to abstime by nanoseconds_to_absolutetime().
948 * Refer to _delayed_call_timer(...)
950 * if thread_call_func_delayed is not exported in the future,
951 * we can use timeout() or bsd_timeout() to replace it.
953 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
957 SLASSERT(t->magic == KTIMER_MAGIC);
959 kspin_lock(&t->guard);
960 ktimer_disarm_locked(t);
962 nanoseconds_to_absolutetime(deadline, &abstime);
963 thread_call_func_delayed(ktimer_actor, t, deadline);
964 kspin_unlock(&t->guard);
967 void ktimer_disarm(struct ktimer *t)
970 SLASSERT(t->magic == KTIMER_MAGIC);
972 kspin_lock(&t->guard);
974 ktimer_disarm_locked(t);
975 kspin_unlock(&t->guard);
978 int ktimer_is_armed(struct ktimer *t)
981 SLASSERT(t->magic == KTIMER_MAGIC);
984 * no locking---result is only a hint anyway.
989 u_int64_t ktimer_deadline(struct ktimer *t)
992 SLASSERT(t->magic == KTIMER_MAGIC);
997 void cfs_sync_init(void)
1000 /* Initialize lock group */
1001 cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1005 void cfs_sync_fini(void)
1009 * XXX Liang: destroy lock group. As we haven't called lock_done
1010 * for all locks, cfs_lock_grp may not be freed by kernel(reference
1013 lck_grp_free(cfs_lock_grp);
1014 cfs_lock_grp = NULL;
1019 * c-indentation-style: "K&R"