4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * libcfs/libcfs/darwin/darwin-sync.c
36 * XNU synchronization primitives.
38 * Author: Nikita Danilov <nikita@clusterfs.com>
42 * This file contains very simplistic implementations of (saner) API for
43 * basic synchronization primitives:
51 * - condition variable (kcond)
53 * - wait-queue (ksleep_chan and ksleep_link)
57 * A lot can be optimized here.
60 #define DEBUG_SUBSYSTEM S_LNET
63 # include <kern/locks.h>
65 # include <mach/mach_types.h>
66 # include <sys/types.h>
67 # include <kern/simple_lock.h>
70 #include <libcfs/libcfs.h>
72 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
74 #ifdef HAVE_GET_PREEMPTION_LEVEL
75 extern int get_preemption_level(void);
77 #define get_preemption_level() (0)
83 static lck_grp_t *cfs_lock_grp = NULL;
84 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
86 /* hw_lock_* are not exported by Darwin8 */
87 static inline void xnu_spin_init(xnu_spin_t *s)
89 SLASSERT(cfs_lock_grp != NULL);
90 //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
91 lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
94 static inline void xnu_spin_done(xnu_spin_t *s)
96 SLASSERT(cfs_lock_grp != NULL);
97 //lck_spin_free(*s, cfs_lock_grp);
99 lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
102 #define xnu_spin_lock(s) lck_spin_lock((lck_spin_t *)(s))
103 #define xnu_spin_unlock(s) lck_spin_unlock((lck_spin_t *)(s))
105 #warning "Darwin8 does not export lck_spin_try_lock"
106 #define xnu_spin_try(s) (1)
109 extern void hw_lock_init(hw_lock_t);
110 extern void hw_lock_lock(hw_lock_t);
111 extern void hw_lock_unlock(hw_lock_t);
112 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
113 extern unsigned int hw_lock_try(hw_lock_t);
114 extern unsigned int hw_lock_held(hw_lock_t);
116 #define xnu_spin_init(s) hw_lock_init(s)
117 #define xnu_spin_done(s) do {} while (0)
118 #define xnu_spin_lock(s) hw_lock_lock(s)
119 #define xnu_spin_unlock(s) hw_lock_unlock(s)
120 #define xnu_spin_try(s) hw_lock_try(s)
124 #define xnu_spin_init(s) do {} while (0)
125 #define xnu_spin_done(s) do {} while (0)
126 #define xnu_spin_lock(s) do {} while (0)
127 #define xnu_spin_unlock(s) do {} while (0)
128 #define xnu_spin_try(s) (1)
132 * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
133 * example), uses spin-locks, so debugging output here may lead to nasty
136 * In uniprocessor version of spin-lock. Only checks.
139 void kspin_init(struct kspin *spin)
141 SLASSERT(spin != NULL);
142 xnu_spin_init(&spin->lock);
143 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
144 ON_SYNC_DEBUG(spin->owner = NULL);
147 void kspin_done(struct kspin *spin)
149 SLASSERT(spin != NULL);
150 SLASSERT(spin->magic == KSPIN_MAGIC);
151 SLASSERT(spin->owner == NULL);
152 xnu_spin_done(&spin->lock);
155 void kspin_lock(struct kspin *spin)
157 SLASSERT(spin != NULL);
158 SLASSERT(spin->magic == KSPIN_MAGIC);
159 SLASSERT(spin->owner != current_thread());
162 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
163 * from here: this will lead to infinite recursion.
166 xnu_spin_lock(&spin->lock);
167 SLASSERT(spin->owner == NULL);
168 ON_SYNC_DEBUG(spin->owner = current_thread());
171 void kspin_unlock(struct kspin *spin)
174 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
175 * from here: this will lead to infinite recursion.
178 SLASSERT(spin != NULL);
179 SLASSERT(spin->magic == KSPIN_MAGIC);
180 SLASSERT(spin->owner == current_thread());
181 ON_SYNC_DEBUG(spin->owner = NULL);
182 xnu_spin_unlock(&spin->lock);
185 int kspin_trylock(struct kspin *spin)
187 SLASSERT(spin != NULL);
188 SLASSERT(spin->magic == KSPIN_MAGIC);
190 if (xnu_spin_try(&spin->lock)) {
191 SLASSERT(spin->owner == NULL);
192 ON_SYNC_DEBUG(spin->owner = current_thread());
199 int kspin_islocked(struct kspin *spin)
201 SLASSERT(spin != NULL);
202 SLASSERT(spin->magic == KSPIN_MAGIC);
203 return spin->owner == current_thread();
206 int kspin_isnotlocked(struct kspin *spin)
208 SLASSERT(spin != NULL);
209 SLASSERT(spin->magic == KSPIN_MAGIC);
210 return spin->owner != current_thread();
215 * read/write spin-lock
217 void krw_spin_init(struct krw_spin *rwspin)
219 SLASSERT(rwspin != NULL);
221 kspin_init(&rwspin->guard);
223 ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
226 void krw_spin_done(struct krw_spin *rwspin)
228 SLASSERT(rwspin != NULL);
229 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
230 SLASSERT(rwspin->count == 0);
231 kspin_done(&rwspin->guard);
234 void krw_spin_down_r(struct krw_spin *rwspin)
237 SLASSERT(rwspin != NULL);
238 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
240 kspin_lock(&rwspin->guard);
241 while(rwspin->count < 0) {
243 kspin_unlock(&rwspin->guard);
244 while (--i != 0 && rwspin->count < 0)
246 kspin_lock(&rwspin->guard);
249 kspin_unlock(&rwspin->guard);
252 void krw_spin_down_w(struct krw_spin *rwspin)
255 SLASSERT(rwspin != NULL);
256 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
258 kspin_lock(&rwspin->guard);
259 while (rwspin->count != 0) {
261 kspin_unlock(&rwspin->guard);
262 while (--i != 0 && rwspin->count != 0)
264 kspin_lock(&rwspin->guard);
267 kspin_unlock(&rwspin->guard);
270 void krw_spin_up_r(struct krw_spin *rwspin)
272 SLASSERT(rwspin != NULL);
273 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
274 SLASSERT(rwspin->count > 0);
276 kspin_lock(&rwspin->guard);
278 kspin_unlock(&rwspin->guard);
281 void krw_spin_up_w(struct krw_spin *rwspin)
283 SLASSERT(rwspin != NULL);
284 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
285 SLASSERT(rwspin->count == -1);
287 kspin_lock(&rwspin->guard);
289 kspin_unlock(&rwspin->guard);
297 #define xnu_waitq_init(q, a) do {} while (0)
298 #define xnu_waitq_done(q) do {} while (0)
299 #define xnu_waitq_wakeup_one(q, e, s) ({wakeup_one((void *)(e)); KERN_SUCCESS;})
300 #define xnu_waitq_wakeup_all(q, e, s) ({wakeup((void *)(e)); KERN_SUCCESS;})
301 #define xnu_waitq_assert_wait(q, e, s) assert_wait((e), s)
305 #define xnu_waitq_init(q, a) wait_queue_init((q), a)
306 #define xnu_waitq_done(q) do {} while (0)
307 #define xnu_waitq_wakeup_one(q, e, s) wait_queue_wakeup_one((q), (event_t)(e), s)
308 #define xnu_waitq_wakeup_all(q, e, s) wait_queue_wakeup_all((q), (event_t)(e), s)
309 #define xnu_waitq_assert_wait(q, e, s) wait_queue_assert_wait((q), (event_t)(e), s)
312 void ksem_init(struct ksem *sem, int value)
314 SLASSERT(sem != NULL);
315 kspin_init(&sem->guard);
316 xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
318 ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
321 void ksem_done(struct ksem *sem)
323 SLASSERT(sem != NULL);
324 SLASSERT(sem->magic == KSEM_MAGIC);
326 * XXX nikita: cannot check that &sem->q is empty because
327 * wait_queue_empty() is Apple private API.
329 kspin_done(&sem->guard);
332 int ksem_up(struct ksem *sem, int value)
336 SLASSERT(sem != NULL);
337 SLASSERT(sem->magic == KSEM_MAGIC);
338 SLASSERT(value >= 0);
340 kspin_lock(&sem->guard);
343 result = xnu_waitq_wakeup_one(&sem->q, sem,
346 result = xnu_waitq_wakeup_all(&sem->q, sem,
348 kspin_unlock(&sem->guard);
349 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
350 return (result == KERN_SUCCESS) ? 0 : 1;
353 void ksem_down(struct ksem *sem, int value)
357 SLASSERT(sem != NULL);
358 SLASSERT(sem->magic == KSEM_MAGIC);
359 SLASSERT(value >= 0);
360 SLASSERT(get_preemption_level() == 0);
362 kspin_lock(&sem->guard);
363 while (sem->value < value) {
364 result = xnu_waitq_assert_wait(&sem->q, sem,
366 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
367 kspin_unlock(&sem->guard);
368 if (result == THREAD_WAITING)
369 thread_block(THREAD_CONTINUE_NULL);
370 kspin_lock(&sem->guard);
373 kspin_unlock(&sem->guard);
376 int ksem_trydown(struct ksem *sem, int value)
380 SLASSERT(sem != NULL);
381 SLASSERT(sem->magic == KSEM_MAGIC);
382 SLASSERT(value >= 0);
384 kspin_lock(&sem->guard);
385 if (sem->value >= value) {
390 kspin_unlock(&sem->guard);
394 void kmut_init(struct kmut *mut)
396 SLASSERT(mut != NULL);
397 ksem_init(&mut->s, 1);
398 ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
399 ON_SYNC_DEBUG(mut->owner = NULL);
402 void kmut_done(struct kmut *mut)
404 SLASSERT(mut != NULL);
405 SLASSERT(mut->magic == KMUT_MAGIC);
406 SLASSERT(mut->owner == NULL);
410 void kmut_lock(struct kmut *mut)
412 SLASSERT(mut != NULL);
413 SLASSERT(mut->magic == KMUT_MAGIC);
414 SLASSERT(mut->owner != current_thread());
415 SLASSERT(get_preemption_level() == 0);
417 ksem_down(&mut->s, 1);
418 ON_SYNC_DEBUG(mut->owner = current_thread());
421 void kmut_unlock(struct kmut *mut)
423 SLASSERT(mut != NULL);
424 SLASSERT(mut->magic == KMUT_MAGIC);
425 SLASSERT(mut->owner == current_thread());
427 ON_SYNC_DEBUG(mut->owner = NULL);
431 int kmut_trylock(struct kmut *mut)
433 SLASSERT(mut != NULL);
434 SLASSERT(mut->magic == KMUT_MAGIC);
435 return ksem_trydown(&mut->s, 1);
439 int kmut_islocked(struct kmut *mut)
441 SLASSERT(mut != NULL);
442 SLASSERT(mut->magic == KMUT_MAGIC);
443 return mut->owner == current_thread();
446 int kmut_isnotlocked(struct kmut *mut)
448 SLASSERT(mut != NULL);
449 SLASSERT(mut->magic == KMUT_MAGIC);
450 return mut->owner != current_thread();
455 void kcond_init(struct kcond *cond)
457 SLASSERT(cond != NULL);
459 kspin_init(&cond->guard);
460 cond->waiters = NULL;
461 ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
464 void kcond_done(struct kcond *cond)
466 SLASSERT(cond != NULL);
467 SLASSERT(cond->magic == KCOND_MAGIC);
468 SLASSERT(cond->waiters == NULL);
469 kspin_done(&cond->guard);
472 void kcond_wait(struct kcond *cond, struct kspin *lock)
474 struct kcond_link link;
476 SLASSERT(cond != NULL);
477 SLASSERT(lock != NULL);
478 SLASSERT(cond->magic == KCOND_MAGIC);
479 SLASSERT(kspin_islocked(lock));
481 ksem_init(&link.sem, 0);
482 kspin_lock(&cond->guard);
483 link.next = cond->waiters;
484 cond->waiters = &link;
485 kspin_unlock(&cond->guard);
488 ksem_down(&link.sem, 1);
490 kspin_lock(&cond->guard);
491 kspin_unlock(&cond->guard);
495 void kcond_wait_guard(struct kcond *cond)
497 struct kcond_link link;
499 SLASSERT(cond != NULL);
500 SLASSERT(cond->magic == KCOND_MAGIC);
501 SLASSERT(kspin_islocked(&cond->guard));
503 ksem_init(&link.sem, 0);
504 link.next = cond->waiters;
505 cond->waiters = &link;
506 kspin_unlock(&cond->guard);
508 ksem_down(&link.sem, 1);
510 kspin_lock(&cond->guard);
513 void kcond_signal_guard(struct kcond *cond)
515 struct kcond_link *link;
517 SLASSERT(cond != NULL);
518 SLASSERT(cond->magic == KCOND_MAGIC);
519 SLASSERT(kspin_islocked(&cond->guard));
521 link = cond->waiters;
523 cond->waiters = link->next;
524 ksem_up(&link->sem, 1);
528 void kcond_signal(struct kcond *cond)
530 SLASSERT(cond != NULL);
531 SLASSERT(cond->magic == KCOND_MAGIC);
533 kspin_lock(&cond->guard);
534 kcond_signal_guard(cond);
535 kspin_unlock(&cond->guard);
538 void kcond_broadcast_guard(struct kcond *cond)
540 struct kcond_link *link;
542 SLASSERT(cond != NULL);
543 SLASSERT(cond->magic == KCOND_MAGIC);
544 SLASSERT(kspin_islocked(&cond->guard));
546 for (link = cond->waiters; link != NULL; link = link->next)
547 ksem_up(&link->sem, 1);
548 cond->waiters = NULL;
551 void kcond_broadcast(struct kcond *cond)
553 SLASSERT(cond != NULL);
554 SLASSERT(cond->magic == KCOND_MAGIC);
556 kspin_lock(&cond->guard);
557 kcond_broadcast_guard(cond);
558 kspin_unlock(&cond->guard);
561 void krw_sem_init(struct krw_sem *sem)
563 SLASSERT(sem != NULL);
565 kcond_init(&sem->cond);
567 ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
570 void krw_sem_done(struct krw_sem *sem)
572 SLASSERT(sem != NULL);
573 SLASSERT(sem->magic == KRW_MAGIC);
574 SLASSERT(sem->count == 0);
575 kcond_done(&sem->cond);
578 void krw_sem_down_r(struct krw_sem *sem)
580 SLASSERT(sem != NULL);
581 SLASSERT(sem->magic == KRW_MAGIC);
582 SLASSERT(get_preemption_level() == 0);
584 kspin_lock(&sem->cond.guard);
585 while (sem->count < 0)
586 kcond_wait_guard(&sem->cond);
588 kspin_unlock(&sem->cond.guard);
591 int krw_sem_down_r_try(struct krw_sem *sem)
593 SLASSERT(sem != NULL);
594 SLASSERT(sem->magic == KRW_MAGIC);
596 kspin_lock(&sem->cond.guard);
597 if (sem->count < 0) {
598 kspin_unlock(&sem->cond.guard);
602 kspin_unlock(&sem->cond.guard);
606 void krw_sem_down_w(struct krw_sem *sem)
608 SLASSERT(sem != NULL);
609 SLASSERT(sem->magic == KRW_MAGIC);
610 SLASSERT(get_preemption_level() == 0);
612 kspin_lock(&sem->cond.guard);
613 while (sem->count != 0)
614 kcond_wait_guard(&sem->cond);
616 kspin_unlock(&sem->cond.guard);
619 int krw_sem_down_w_try(struct krw_sem *sem)
621 SLASSERT(sem != NULL);
622 SLASSERT(sem->magic == KRW_MAGIC);
624 kspin_lock(&sem->cond.guard);
625 if (sem->count != 0) {
626 kspin_unlock(&sem->cond.guard);
630 kspin_unlock(&sem->cond.guard);
634 void krw_sem_up_r(struct krw_sem *sem)
636 SLASSERT(sem != NULL);
637 SLASSERT(sem->magic == KRW_MAGIC);
638 SLASSERT(sem->count > 0);
640 kspin_lock(&sem->cond.guard);
643 kcond_broadcast_guard(&sem->cond);
644 kspin_unlock(&sem->cond.guard);
647 void krw_sem_up_w(struct krw_sem *sem)
649 SLASSERT(sem != NULL);
650 SLASSERT(sem->magic == KRW_MAGIC);
651 SLASSERT(sem->count == -1);
653 kspin_lock(&sem->cond.guard);
655 kspin_unlock(&sem->cond.guard);
656 kcond_broadcast(&sem->cond);
659 void ksleep_chan_init(struct ksleep_chan *chan)
661 SLASSERT(chan != NULL);
663 kspin_init(&chan->guard);
664 CFS_INIT_LIST_HEAD(&chan->waiters);
665 ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
668 void ksleep_chan_done(struct ksleep_chan *chan)
670 SLASSERT(chan != NULL);
671 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
672 SLASSERT(list_empty(&chan->waiters));
673 kspin_done(&chan->guard);
676 void ksleep_link_init(struct ksleep_link *link)
678 SLASSERT(link != NULL);
680 CFS_INIT_LIST_HEAD(&link->linkage);
682 link->event = current_thread();
684 link->forward = NULL;
685 ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
688 void ksleep_link_done(struct ksleep_link *link)
690 SLASSERT(link != NULL);
691 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
692 SLASSERT(list_empty(&link->linkage));
695 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
697 SLASSERT(chan != NULL);
698 SLASSERT(link != NULL);
699 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
700 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
701 SLASSERT(list_empty(&link->linkage));
703 kspin_lock(&chan->guard);
704 if (link->flags & KSLEEP_EXCLUSIVE)
705 list_add_tail(&link->linkage, &chan->waiters);
707 list_add(&link->linkage, &chan->waiters);
708 kspin_unlock(&chan->guard);
711 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
713 SLASSERT(chan != NULL);
714 SLASSERT(link != NULL);
715 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
716 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
718 kspin_lock(&chan->guard);
719 list_del_init(&link->linkage);
720 kspin_unlock(&chan->guard);
723 static int has_hits(struct ksleep_chan *chan, event_t event)
725 struct ksleep_link *scan;
727 SLASSERT(kspin_islocked(&chan->guard));
728 list_for_each_entry(scan, &chan->waiters, linkage) {
729 if (scan->event == event && scan->hits > 0) {
738 static void add_hit(struct ksleep_chan *chan, event_t event)
740 struct ksleep_link *scan;
743 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
744 * from here: this will lead to infinite recursion.
747 SLASSERT(kspin_islocked(&chan->guard));
748 list_for_each_entry(scan, &chan->waiters, linkage) {
749 if (scan->event == event) {
756 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
763 SLASSERT(chan != NULL);
764 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
765 SLASSERT(get_preemption_level() == 0);
767 event = current_thread();
768 kspin_lock(&chan->guard);
769 if (!has_hits(chan, event)) {
770 result = assert_wait(event, state);
771 kspin_unlock(&chan->guard);
772 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
773 if (result == THREAD_WAITING)
774 thread_block(THREAD_CONTINUE_NULL);
776 kspin_unlock(&chan->guard);
781 * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
782 * sleep time (non-zero only if thread was waken by a signal (not currently
783 * implemented), or waitq was already in the "signalled" state).
785 int64_t ksleep_timedwait(struct ksleep_chan *chan,
786 cfs_task_state_t state,
793 SLASSERT(chan != NULL);
794 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
795 SLASSERT(get_preemption_level() == 0);
797 event = current_thread();
798 kspin_lock(&chan->guard);
799 if (!has_hits(chan, event)) {
802 result = assert_wait(event, state);
805 * arm a timer. thread_set_timer()'s first argument is
806 * uint32_t, so we have to cook deadline ourselves.
808 nanoseconds_to_absolutetime(timeout, &expire);
809 clock_absolutetime_interval_to_deadline(expire, &expire);
810 thread_set_timer_deadline(expire);
812 kspin_unlock(&chan->guard);
813 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
814 if (result == THREAD_WAITING)
815 result = thread_block(THREAD_CONTINUE_NULL);
816 thread_cancel_timer();
818 if (result == THREAD_TIMED_OUT)
822 clock_get_uptime(&now);
824 absolutetime_to_nanoseconds(expire - now, &timeout);
829 /* just return timeout, because I've got event and don't need to wait */
830 kspin_unlock(&chan->guard);
837 * wake up single exclusive waiter (plus some arbitrary number of *
840 void ksleep_wake(struct ksleep_chan *chan)
843 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
844 * from here: this will lead to infinite recursion.
846 ksleep_wake_nr(chan, 1);
850 * wake up all waiters on @chan
852 void ksleep_wake_all(struct ksleep_chan *chan)
855 ksleep_wake_nr(chan, 0);
860 * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
861 * number of non-exclusive. If @nr is 0, wake up all waiters.
863 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
865 struct ksleep_link *scan;
869 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
870 * from here: this will lead to infinite recursion.
873 SLASSERT(chan != NULL);
874 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
876 kspin_lock(&chan->guard);
877 list_for_each_entry(scan, &chan->waiters, linkage) {
878 struct ksleep_chan *forward;
880 forward = scan->forward;
882 kspin_lock(&forward->guard);
883 result = thread_wakeup(scan->event);
884 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
885 if (result == KERN_NOT_WAITING) {
888 add_hit(forward, scan->event);
891 kspin_unlock(&forward->guard);
892 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
895 kspin_unlock(&chan->guard);
898 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
901 SLASSERT(func != NULL);
903 kspin_init(&t->guard);
906 ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
909 void ktimer_done(struct ktimer *t)
912 SLASSERT(t->magic == KTIMER_MAGIC);
913 kspin_done(&t->guard);
914 ON_SYNC_DEBUG(t->magic = 0);
917 static void ktimer_actor(void *arg0, void *arg1)
924 * this assumes that ktimer's are never freed.
927 SLASSERT(t->magic == KTIMER_MAGIC);
930 * call actual timer function
932 kspin_lock(&t->guard);
935 kspin_unlock(&t->guard);
941 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
942 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
944 static void ktimer_disarm_locked(struct ktimer *t)
947 SLASSERT(t->magic == KTIMER_MAGIC);
949 thread_call_func_cancel(ktimer_actor, t, FALSE);
953 * Received deadline is nanoseconds, but time checked by
954 * thread_call is absolute time (The abstime unit is equal to
955 * the length of one bus cycle, so the duration is dependent
956 * on the bus speed of the computer), so we need to convert
957 * nanotime to abstime by nanoseconds_to_absolutetime().
959 * Refer to _delayed_call_timer(...)
961 * if thread_call_func_delayed is not exported in the future,
962 * we can use timeout() or bsd_timeout() to replace it.
964 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
968 SLASSERT(t->magic == KTIMER_MAGIC);
970 kspin_lock(&t->guard);
971 ktimer_disarm_locked(t);
973 nanoseconds_to_absolutetime(deadline, &abstime);
974 thread_call_func_delayed(ktimer_actor, t, deadline);
975 kspin_unlock(&t->guard);
978 void ktimer_disarm(struct ktimer *t)
981 SLASSERT(t->magic == KTIMER_MAGIC);
983 kspin_lock(&t->guard);
985 ktimer_disarm_locked(t);
986 kspin_unlock(&t->guard);
989 int ktimer_is_armed(struct ktimer *t)
992 SLASSERT(t->magic == KTIMER_MAGIC);
995 * no locking---result is only a hint anyway.
1000 u_int64_t ktimer_deadline(struct ktimer *t)
1002 SLASSERT(t != NULL);
1003 SLASSERT(t->magic == KTIMER_MAGIC);
1008 void cfs_sync_init(void)
1011 /* Initialize lock group */
1012 cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1016 void cfs_sync_fini(void)
1020 * XXX Liang: destroy lock group. As we haven't called lock_done
1021 * for all locks, cfs_lock_grp may not be freed by kernel(reference
1024 lck_grp_free(cfs_lock_grp);
1025 cfs_lock_grp = NULL;
1030 * c-indentation-style: "K&R"