1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/darwin/darwin-sync.c
38 * XNU synchronization primitives.
40 * Author: Nikita Danilov <nikita@clusterfs.com>
44 * This file contains very simplistic implementations of (saner) API for
45 * basic synchronization primitives:
53 * - condition variable (kcond)
55 * - wait-queue (ksleep_chan and ksleep_link)
59 * A lot can be optimized here.
62 #define DEBUG_SUBSYSTEM S_LNET
65 # include <kern/locks.h>
67 # include <mach/mach_types.h>
68 # include <sys/types.h>
69 # include <kern/simple_lock.h>
72 #include <libcfs/libcfs.h>
74 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
76 #ifdef HAVE_GET_PREEMPTION_LEVEL
77 extern int get_preemption_level(void);
79 #define get_preemption_level() (0)
85 static lck_grp_t *cfs_lock_grp = NULL;
86 #warning "Verify definition of lck_spin_t hasn't been changed while building!"
88 /* hw_lock_* are not exported by Darwin8 */
89 static inline void xnu_spin_init(xnu_spin_t *s)
91 SLASSERT(cfs_lock_grp != NULL);
92 //*s = lck_spin_alloc_init(cfs_lock_grp, LCK_ATTR_NULL);
93 lck_spin_init((lck_spin_t *)s, cfs_lock_grp, LCK_ATTR_NULL);
96 static inline void xnu_spin_done(xnu_spin_t *s)
98 SLASSERT(cfs_lock_grp != NULL);
99 //lck_spin_free(*s, cfs_lock_grp);
101 lck_spin_destroy((lck_spin_t *)s, cfs_lock_grp);
104 #define xnu_spin_lock(s) lck_spin_lock((lck_spin_t *)(s))
105 #define xnu_spin_unlock(s) lck_spin_unlock((lck_spin_t *)(s))
107 #warning "Darwin8 does not export lck_spin_try_lock"
108 #define xnu_spin_try(s) (1)
111 extern void hw_lock_init(hw_lock_t);
112 extern void hw_lock_lock(hw_lock_t);
113 extern void hw_lock_unlock(hw_lock_t);
114 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
115 extern unsigned int hw_lock_try(hw_lock_t);
116 extern unsigned int hw_lock_held(hw_lock_t);
118 #define xnu_spin_init(s) hw_lock_init(s)
119 #define xnu_spin_done(s) do {} while (0)
120 #define xnu_spin_lock(s) hw_lock_lock(s)
121 #define xnu_spin_unlock(s) hw_lock_unlock(s)
122 #define xnu_spin_try(s) hw_lock_try(s)
126 #define xnu_spin_init(s) do {} while (0)
127 #define xnu_spin_done(s) do {} while (0)
128 #define xnu_spin_lock(s) do {} while (0)
129 #define xnu_spin_unlock(s) do {} while (0)
130 #define xnu_spin_try(s) (1)
134 * Warning: low level libcfs debugging code (libcfs_debug_msg(), for
135 * example), uses spin-locks, so debugging output here may lead to nasty
138 * In uniprocessor version of spin-lock. Only checks.
141 void kspin_init(struct kspin *spin)
143 SLASSERT(spin != NULL);
144 xnu_spin_init(&spin->lock);
145 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
146 ON_SYNC_DEBUG(spin->owner = NULL);
149 void kspin_done(struct kspin *spin)
151 SLASSERT(spin != NULL);
152 SLASSERT(spin->magic == KSPIN_MAGIC);
153 SLASSERT(spin->owner == NULL);
154 xnu_spin_done(&spin->lock);
157 void kspin_lock(struct kspin *spin)
159 SLASSERT(spin != NULL);
160 SLASSERT(spin->magic == KSPIN_MAGIC);
161 SLASSERT(spin->owner != current_thread());
164 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
165 * from here: this will lead to infinite recursion.
168 xnu_spin_lock(&spin->lock);
169 SLASSERT(spin->owner == NULL);
170 ON_SYNC_DEBUG(spin->owner = current_thread());
173 void kspin_unlock(struct kspin *spin)
176 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
177 * from here: this will lead to infinite recursion.
180 SLASSERT(spin != NULL);
181 SLASSERT(spin->magic == KSPIN_MAGIC);
182 SLASSERT(spin->owner == current_thread());
183 ON_SYNC_DEBUG(spin->owner = NULL);
184 xnu_spin_unlock(&spin->lock);
187 int kspin_trylock(struct kspin *spin)
189 SLASSERT(spin != NULL);
190 SLASSERT(spin->magic == KSPIN_MAGIC);
192 if (xnu_spin_try(&spin->lock)) {
193 SLASSERT(spin->owner == NULL);
194 ON_SYNC_DEBUG(spin->owner = current_thread());
201 int kspin_islocked(struct kspin *spin)
203 SLASSERT(spin != NULL);
204 SLASSERT(spin->magic == KSPIN_MAGIC);
205 return spin->owner == current_thread();
208 int kspin_isnotlocked(struct kspin *spin)
210 SLASSERT(spin != NULL);
211 SLASSERT(spin->magic == KSPIN_MAGIC);
212 return spin->owner != current_thread();
217 * read/write spin-lock
219 void krw_spin_init(struct krw_spin *rwspin)
221 SLASSERT(rwspin != NULL);
223 kspin_init(&rwspin->guard);
225 ON_SYNC_DEBUG(rwspin->magic = KRW_SPIN_MAGIC);
228 void krw_spin_done(struct krw_spin *rwspin)
230 SLASSERT(rwspin != NULL);
231 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
232 SLASSERT(rwspin->count == 0);
233 kspin_done(&rwspin->guard);
236 void krw_spin_down_r(struct krw_spin *rwspin)
239 SLASSERT(rwspin != NULL);
240 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
242 kspin_lock(&rwspin->guard);
243 while(rwspin->count < 0) {
245 kspin_unlock(&rwspin->guard);
246 while (--i != 0 && rwspin->count < 0)
248 kspin_lock(&rwspin->guard);
251 kspin_unlock(&rwspin->guard);
254 void krw_spin_down_w(struct krw_spin *rwspin)
257 SLASSERT(rwspin != NULL);
258 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
260 kspin_lock(&rwspin->guard);
261 while (rwspin->count != 0) {
263 kspin_unlock(&rwspin->guard);
264 while (--i != 0 && rwspin->count != 0)
266 kspin_lock(&rwspin->guard);
269 kspin_unlock(&rwspin->guard);
272 void krw_spin_up_r(struct krw_spin *rwspin)
274 SLASSERT(rwspin != NULL);
275 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
276 SLASSERT(rwspin->count > 0);
278 kspin_lock(&rwspin->guard);
280 kspin_unlock(&rwspin->guard);
283 void krw_spin_up_w(struct krw_spin *rwspin)
285 SLASSERT(rwspin != NULL);
286 SLASSERT(rwspin->magic == KRW_SPIN_MAGIC);
287 SLASSERT(rwspin->count == -1);
289 kspin_lock(&rwspin->guard);
291 kspin_unlock(&rwspin->guard);
299 #define xnu_waitq_init(q, a) do {} while (0)
300 #define xnu_waitq_done(q) do {} while (0)
301 #define xnu_waitq_wakeup_one(q, e, s) ({wakeup_one((void *)(e)); KERN_SUCCESS;})
302 #define xnu_waitq_wakeup_all(q, e, s) ({wakeup((void *)(e)); KERN_SUCCESS;})
303 #define xnu_waitq_assert_wait(q, e, s) assert_wait((e), s)
307 #define xnu_waitq_init(q, a) wait_queue_init((q), a)
308 #define xnu_waitq_done(q) do {} while (0)
309 #define xnu_waitq_wakeup_one(q, e, s) wait_queue_wakeup_one((q), (event_t)(e), s)
310 #define xnu_waitq_wakeup_all(q, e, s) wait_queue_wakeup_all((q), (event_t)(e), s)
311 #define xnu_waitq_assert_wait(q, e, s) wait_queue_assert_wait((q), (event_t)(e), s)
314 void ksem_init(struct ksem *sem, int value)
316 SLASSERT(sem != NULL);
317 kspin_init(&sem->guard);
318 xnu_waitq_init(&sem->q, SYNC_POLICY_FIFO);
320 ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
323 void ksem_done(struct ksem *sem)
325 SLASSERT(sem != NULL);
326 SLASSERT(sem->magic == KSEM_MAGIC);
328 * XXX nikita: cannot check that &sem->q is empty because
329 * wait_queue_empty() is Apple private API.
331 kspin_done(&sem->guard);
334 int ksem_up(struct ksem *sem, int value)
338 SLASSERT(sem != NULL);
339 SLASSERT(sem->magic == KSEM_MAGIC);
340 SLASSERT(value >= 0);
342 kspin_lock(&sem->guard);
345 result = xnu_waitq_wakeup_one(&sem->q, sem,
348 result = xnu_waitq_wakeup_all(&sem->q, sem,
350 kspin_unlock(&sem->guard);
351 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
352 return (result == KERN_SUCCESS) ? 0 : 1;
355 void ksem_down(struct ksem *sem, int value)
359 SLASSERT(sem != NULL);
360 SLASSERT(sem->magic == KSEM_MAGIC);
361 SLASSERT(value >= 0);
362 SLASSERT(get_preemption_level() == 0);
364 kspin_lock(&sem->guard);
365 while (sem->value < value) {
366 result = xnu_waitq_assert_wait(&sem->q, sem,
368 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
369 kspin_unlock(&sem->guard);
370 if (result == THREAD_WAITING)
371 thread_block(THREAD_CONTINUE_NULL);
372 kspin_lock(&sem->guard);
375 kspin_unlock(&sem->guard);
378 int ksem_trydown(struct ksem *sem, int value)
382 SLASSERT(sem != NULL);
383 SLASSERT(sem->magic == KSEM_MAGIC);
384 SLASSERT(value >= 0);
386 kspin_lock(&sem->guard);
387 if (sem->value >= value) {
392 kspin_unlock(&sem->guard);
396 void kmut_init(struct kmut *mut)
398 SLASSERT(mut != NULL);
399 ksem_init(&mut->s, 1);
400 ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
401 ON_SYNC_DEBUG(mut->owner = NULL);
404 void kmut_done(struct kmut *mut)
406 SLASSERT(mut != NULL);
407 SLASSERT(mut->magic == KMUT_MAGIC);
408 SLASSERT(mut->owner == NULL);
412 void kmut_lock(struct kmut *mut)
414 SLASSERT(mut != NULL);
415 SLASSERT(mut->magic == KMUT_MAGIC);
416 SLASSERT(mut->owner != current_thread());
417 SLASSERT(get_preemption_level() == 0);
419 ksem_down(&mut->s, 1);
420 ON_SYNC_DEBUG(mut->owner = current_thread());
423 void kmut_unlock(struct kmut *mut)
425 SLASSERT(mut != NULL);
426 SLASSERT(mut->magic == KMUT_MAGIC);
427 SLASSERT(mut->owner == current_thread());
429 ON_SYNC_DEBUG(mut->owner = NULL);
433 int kmut_trylock(struct kmut *mut)
435 SLASSERT(mut != NULL);
436 SLASSERT(mut->magic == KMUT_MAGIC);
437 return ksem_trydown(&mut->s, 1);
441 int kmut_islocked(struct kmut *mut)
443 SLASSERT(mut != NULL);
444 SLASSERT(mut->magic == KMUT_MAGIC);
445 return mut->owner == current_thread();
448 int kmut_isnotlocked(struct kmut *mut)
450 SLASSERT(mut != NULL);
451 SLASSERT(mut->magic == KMUT_MAGIC);
452 return mut->owner != current_thread();
457 void kcond_init(struct kcond *cond)
459 SLASSERT(cond != NULL);
461 kspin_init(&cond->guard);
462 cond->waiters = NULL;
463 ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
466 void kcond_done(struct kcond *cond)
468 SLASSERT(cond != NULL);
469 SLASSERT(cond->magic == KCOND_MAGIC);
470 SLASSERT(cond->waiters == NULL);
471 kspin_done(&cond->guard);
474 void kcond_wait(struct kcond *cond, struct kspin *lock)
476 struct kcond_link link;
478 SLASSERT(cond != NULL);
479 SLASSERT(lock != NULL);
480 SLASSERT(cond->magic == KCOND_MAGIC);
481 SLASSERT(kspin_islocked(lock));
483 ksem_init(&link.sem, 0);
484 kspin_lock(&cond->guard);
485 link.next = cond->waiters;
486 cond->waiters = &link;
487 kspin_unlock(&cond->guard);
490 ksem_down(&link.sem, 1);
492 kspin_lock(&cond->guard);
493 kspin_unlock(&cond->guard);
497 void kcond_wait_guard(struct kcond *cond)
499 struct kcond_link link;
501 SLASSERT(cond != NULL);
502 SLASSERT(cond->magic == KCOND_MAGIC);
503 SLASSERT(kspin_islocked(&cond->guard));
505 ksem_init(&link.sem, 0);
506 link.next = cond->waiters;
507 cond->waiters = &link;
508 kspin_unlock(&cond->guard);
510 ksem_down(&link.sem, 1);
512 kspin_lock(&cond->guard);
515 void kcond_signal_guard(struct kcond *cond)
517 struct kcond_link *link;
519 SLASSERT(cond != NULL);
520 SLASSERT(cond->magic == KCOND_MAGIC);
521 SLASSERT(kspin_islocked(&cond->guard));
523 link = cond->waiters;
525 cond->waiters = link->next;
526 ksem_up(&link->sem, 1);
530 void kcond_signal(struct kcond *cond)
532 SLASSERT(cond != NULL);
533 SLASSERT(cond->magic == KCOND_MAGIC);
535 kspin_lock(&cond->guard);
536 kcond_signal_guard(cond);
537 kspin_unlock(&cond->guard);
540 void kcond_broadcast_guard(struct kcond *cond)
542 struct kcond_link *link;
544 SLASSERT(cond != NULL);
545 SLASSERT(cond->magic == KCOND_MAGIC);
546 SLASSERT(kspin_islocked(&cond->guard));
548 for (link = cond->waiters; link != NULL; link = link->next)
549 ksem_up(&link->sem, 1);
550 cond->waiters = NULL;
553 void kcond_broadcast(struct kcond *cond)
555 SLASSERT(cond != NULL);
556 SLASSERT(cond->magic == KCOND_MAGIC);
558 kspin_lock(&cond->guard);
559 kcond_broadcast_guard(cond);
560 kspin_unlock(&cond->guard);
563 void krw_sem_init(struct krw_sem *sem)
565 SLASSERT(sem != NULL);
567 kcond_init(&sem->cond);
569 ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
572 void krw_sem_done(struct krw_sem *sem)
574 SLASSERT(sem != NULL);
575 SLASSERT(sem->magic == KRW_MAGIC);
576 SLASSERT(sem->count == 0);
577 kcond_done(&sem->cond);
580 void krw_sem_down_r(struct krw_sem *sem)
582 SLASSERT(sem != NULL);
583 SLASSERT(sem->magic == KRW_MAGIC);
584 SLASSERT(get_preemption_level() == 0);
586 kspin_lock(&sem->cond.guard);
587 while (sem->count < 0)
588 kcond_wait_guard(&sem->cond);
590 kspin_unlock(&sem->cond.guard);
593 int krw_sem_down_r_try(struct krw_sem *sem)
595 SLASSERT(sem != NULL);
596 SLASSERT(sem->magic == KRW_MAGIC);
598 kspin_lock(&sem->cond.guard);
599 if (sem->count < 0) {
600 kspin_unlock(&sem->cond.guard);
604 kspin_unlock(&sem->cond.guard);
608 void krw_sem_down_w(struct krw_sem *sem)
610 SLASSERT(sem != NULL);
611 SLASSERT(sem->magic == KRW_MAGIC);
612 SLASSERT(get_preemption_level() == 0);
614 kspin_lock(&sem->cond.guard);
615 while (sem->count != 0)
616 kcond_wait_guard(&sem->cond);
618 kspin_unlock(&sem->cond.guard);
621 int krw_sem_down_w_try(struct krw_sem *sem)
623 SLASSERT(sem != NULL);
624 SLASSERT(sem->magic == KRW_MAGIC);
626 kspin_lock(&sem->cond.guard);
627 if (sem->count != 0) {
628 kspin_unlock(&sem->cond.guard);
632 kspin_unlock(&sem->cond.guard);
636 void krw_sem_up_r(struct krw_sem *sem)
638 SLASSERT(sem != NULL);
639 SLASSERT(sem->magic == KRW_MAGIC);
640 SLASSERT(sem->count > 0);
642 kspin_lock(&sem->cond.guard);
645 kcond_broadcast_guard(&sem->cond);
646 kspin_unlock(&sem->cond.guard);
649 void krw_sem_up_w(struct krw_sem *sem)
651 SLASSERT(sem != NULL);
652 SLASSERT(sem->magic == KRW_MAGIC);
653 SLASSERT(sem->count == -1);
655 kspin_lock(&sem->cond.guard);
657 kspin_unlock(&sem->cond.guard);
658 kcond_broadcast(&sem->cond);
661 void ksleep_chan_init(struct ksleep_chan *chan)
663 SLASSERT(chan != NULL);
665 kspin_init(&chan->guard);
666 CFS_INIT_LIST_HEAD(&chan->waiters);
667 ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
670 void ksleep_chan_done(struct ksleep_chan *chan)
672 SLASSERT(chan != NULL);
673 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
674 SLASSERT(list_empty(&chan->waiters));
675 kspin_done(&chan->guard);
678 void ksleep_link_init(struct ksleep_link *link)
680 SLASSERT(link != NULL);
682 CFS_INIT_LIST_HEAD(&link->linkage);
684 link->event = current_thread();
686 link->forward = NULL;
687 ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
690 void ksleep_link_done(struct ksleep_link *link)
692 SLASSERT(link != NULL);
693 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
694 SLASSERT(list_empty(&link->linkage));
697 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
699 SLASSERT(chan != NULL);
700 SLASSERT(link != NULL);
701 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
702 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
703 SLASSERT(list_empty(&link->linkage));
705 kspin_lock(&chan->guard);
706 if (link->flags & KSLEEP_EXCLUSIVE)
707 list_add_tail(&link->linkage, &chan->waiters);
709 list_add(&link->linkage, &chan->waiters);
710 kspin_unlock(&chan->guard);
713 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
715 SLASSERT(chan != NULL);
716 SLASSERT(link != NULL);
717 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
718 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
720 kspin_lock(&chan->guard);
721 list_del_init(&link->linkage);
722 kspin_unlock(&chan->guard);
725 static int has_hits(struct ksleep_chan *chan, event_t event)
727 struct ksleep_link *scan;
729 SLASSERT(kspin_islocked(&chan->guard));
730 list_for_each_entry(scan, &chan->waiters, linkage) {
731 if (scan->event == event && scan->hits > 0) {
740 static void add_hit(struct ksleep_chan *chan, event_t event)
742 struct ksleep_link *scan;
745 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
746 * from here: this will lead to infinite recursion.
749 SLASSERT(kspin_islocked(&chan->guard));
750 list_for_each_entry(scan, &chan->waiters, linkage) {
751 if (scan->event == event) {
758 void ksleep_wait(struct ksleep_chan *chan, cfs_task_state_t state)
765 SLASSERT(chan != NULL);
766 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
767 SLASSERT(get_preemption_level() == 0);
769 event = current_thread();
770 kspin_lock(&chan->guard);
771 if (!has_hits(chan, event)) {
772 result = assert_wait(event, state);
773 kspin_unlock(&chan->guard);
774 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
775 if (result == THREAD_WAITING)
776 thread_block(THREAD_CONTINUE_NULL);
778 kspin_unlock(&chan->guard);
783 * Sleep on @chan for no longer than @timeout nano-seconds. Return remaining
784 * sleep time (non-zero only if thread was waken by a signal (not currently
785 * implemented), or waitq was already in the "signalled" state).
787 int64_t ksleep_timedwait(struct ksleep_chan *chan,
788 cfs_task_state_t state,
795 SLASSERT(chan != NULL);
796 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
797 SLASSERT(get_preemption_level() == 0);
799 event = current_thread();
800 kspin_lock(&chan->guard);
801 if (!has_hits(chan, event)) {
804 result = assert_wait(event, state);
807 * arm a timer. thread_set_timer()'s first argument is
808 * uint32_t, so we have to cook deadline ourselves.
810 nanoseconds_to_absolutetime(timeout, &expire);
811 clock_absolutetime_interval_to_deadline(expire, &expire);
812 thread_set_timer_deadline(expire);
814 kspin_unlock(&chan->guard);
815 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
816 if (result == THREAD_WAITING)
817 result = thread_block(THREAD_CONTINUE_NULL);
818 thread_cancel_timer();
820 if (result == THREAD_TIMED_OUT)
824 clock_get_uptime(&now);
826 absolutetime_to_nanoseconds(expire - now, &timeout);
831 /* just return timeout, because I've got event and don't need to wait */
832 kspin_unlock(&chan->guard);
839 * wake up single exclusive waiter (plus some arbitrary number of *
842 void ksleep_wake(struct ksleep_chan *chan)
845 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
846 * from here: this will lead to infinite recursion.
848 ksleep_wake_nr(chan, 1);
852 * wake up all waiters on @chan
854 void ksleep_wake_all(struct ksleep_chan *chan)
857 ksleep_wake_nr(chan, 0);
862 * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
863 * number of non-exclusive. If @nr is 0, wake up all waiters.
865 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
867 struct ksleep_link *scan;
871 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
872 * from here: this will lead to infinite recursion.
875 SLASSERT(chan != NULL);
876 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
878 kspin_lock(&chan->guard);
879 list_for_each_entry(scan, &chan->waiters, linkage) {
880 struct ksleep_chan *forward;
882 forward = scan->forward;
884 kspin_lock(&forward->guard);
885 result = thread_wakeup(scan->event);
886 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
887 if (result == KERN_NOT_WAITING) {
890 add_hit(forward, scan->event);
893 kspin_unlock(&forward->guard);
894 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
897 kspin_unlock(&chan->guard);
900 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
903 SLASSERT(func != NULL);
905 kspin_init(&t->guard);
908 ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
911 void ktimer_done(struct ktimer *t)
914 SLASSERT(t->magic == KTIMER_MAGIC);
915 kspin_done(&t->guard);
916 ON_SYNC_DEBUG(t->magic = 0);
919 static void ktimer_actor(void *arg0, void *arg1)
926 * this assumes that ktimer's are never freed.
929 SLASSERT(t->magic == KTIMER_MAGIC);
932 * call actual timer function
934 kspin_lock(&t->guard);
937 kspin_unlock(&t->guard);
943 extern boolean_t thread_call_func_cancel(thread_call_func_t, thread_call_param_t, boolean_t);
944 extern void thread_call_func_delayed(thread_call_func_t, thread_call_param_t, __u64);
946 static void ktimer_disarm_locked(struct ktimer *t)
949 SLASSERT(t->magic == KTIMER_MAGIC);
951 thread_call_func_cancel(ktimer_actor, t, FALSE);
955 * Received deadline is nanoseconds, but time checked by
956 * thread_call is absolute time (The abstime unit is equal to
957 * the length of one bus cycle, so the duration is dependent
958 * on the bus speed of the computer), so we need to convert
959 * nanotime to abstime by nanoseconds_to_absolutetime().
961 * Refer to _delayed_call_timer(...)
963 * if thread_call_func_delayed is not exported in the future,
964 * we can use timeout() or bsd_timeout() to replace it.
966 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
970 SLASSERT(t->magic == KTIMER_MAGIC);
972 kspin_lock(&t->guard);
973 ktimer_disarm_locked(t);
975 nanoseconds_to_absolutetime(deadline, &abstime);
976 thread_call_func_delayed(ktimer_actor, t, deadline);
977 kspin_unlock(&t->guard);
980 void ktimer_disarm(struct ktimer *t)
983 SLASSERT(t->magic == KTIMER_MAGIC);
985 kspin_lock(&t->guard);
987 ktimer_disarm_locked(t);
988 kspin_unlock(&t->guard);
991 int ktimer_is_armed(struct ktimer *t)
994 SLASSERT(t->magic == KTIMER_MAGIC);
997 * no locking---result is only a hint anyway.
1002 u_int64_t ktimer_deadline(struct ktimer *t)
1004 SLASSERT(t != NULL);
1005 SLASSERT(t->magic == KTIMER_MAGIC);
1010 void cfs_sync_init(void)
1013 /* Initialize lock group */
1014 cfs_lock_grp = lck_grp_alloc_init("libcfs sync", LCK_GRP_ATTR_NULL);
1018 void cfs_sync_fini(void)
1022 * XXX Liang: destroy lock group. As we haven't called lock_done
1023 * for all locks, cfs_lock_grp may not be freed by kernel(reference
1026 lck_grp_free(cfs_lock_grp);
1027 cfs_lock_grp = NULL;
1032 * c-indentation-style: "K&R"