Whamcloud - gitweb
* Landed portals:b_port_step as follows...
[fs/lustre-release.git] / lnet / libcfs / darwin / darwin-sync.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Lustre Light Super operations
5  *
6  *  Copyright (c) 2004 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or modify it under
11  *   the terms of version 2 of the GNU General Public License as published by
12  *   the Free Software Foundation. Lustre is distributed in the hope that it
13  *   will be useful, but WITHOUT ANY WARRANTY; without even the implied
14  *   warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details. You should have received a
16  *   copy of the GNU General Public License along with Lustre; if not, write
17  *   to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
18  *   USA.
19  */
20
21 /*
22  * xnu_sync.c
23  *
24  * Created by nikita on Sun Jul 18 2004.
25  *
26  * Prototypes of XNU synchronization primitives.
27  */
28
29 /*
30  * This file contains very simplistic implementations of (saner) API for
31  * basic synchronization primitives:
32  *
33  *     - spin-lock          (kspin)
34  *
35  *     - semaphore          (ksem)
36  *
37  *     - mutex              (kmut)
38  *
39  *     - condition variable (kcond)
40  *
41  *     - wait-queue         (ksleep_chan and ksleep_link)
42  *
43  *     - timer              (ktimer)
44  *
45  * A lot can be optimized here.
46  */
47
48 #include <mach/mach_types.h>
49 #include <sys/types.h>
50 #include <kern/simple_lock.h>
51
52 #define DEBUG_SUBSYSTEM S_PORTALS
53
54 #include <libcfs/libcfs.h>
55 #include <libcfs/kp30.h>
56
57 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
58
59 #ifdef HAVE_GET_PREEMPTION_LEVEL
60 extern int get_preemption_level(void);
61 #else
62 #define get_preemption_level() (0)
63 #endif
64
65 /*
66  * Warning: low level portals debugging code (portals_debug_msg(), for
67  * example), uses spin-locks, so debugging output here may lead to nasty
68  * surprises.
69  */
70
71 #if SMP
72
73 extern void                     hw_lock_init(hw_lock_t);
74 extern void                     hw_lock_lock(hw_lock_t);
75 extern void                     hw_lock_unlock(hw_lock_t);
76 extern unsigned int             hw_lock_to(hw_lock_t, unsigned int);
77 extern unsigned int             hw_lock_try(hw_lock_t);
78 extern unsigned int             hw_lock_held(hw_lock_t);
79
80 void kspin_init(struct kspin *spin)
81 {
82         SLASSERT(spin != NULL);
83         hw_lock_init(&spin->lock);
84         ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
85         ON_SYNC_DEBUG(spin->owner = NULL);
86 }
87
88 void kspin_done(struct kspin *spin)
89 {
90         SLASSERT(spin != NULL);
91         SLASSERT(spin->magic == KSPIN_MAGIC);
92         SLASSERT(spin->owner == NULL);
93 }
94
95 void kspin_lock(struct kspin *spin)
96 {
97         SLASSERT(spin != NULL);
98         SLASSERT(spin->magic == KSPIN_MAGIC);
99         SLASSERT(spin->owner != current_thread);
100
101         hw_lock_lock(&spin->lock);
102         SLASSERT(spin->owner == NULL);
103         ON_SYNC_DEBUG(spin->owner = current_thread);
104 }
105
106 void kspin_unlock(struct kspin *spin)
107 {
108         SLASSERT(spin != NULL);
109         SLASSERT(spin->magic == KSPIN_MAGIC);
110         SLASSERT(spin->owner == current_thread);
111         ON_SYNC_DEBUG(spin->owner = NULL);
112         hw_lock_unlock(&spin->lock);
113 }
114
115 int  kspin_trylock(struct kspin *spin)
116 {
117         SLASSERT(spin != NULL);
118         SLASSERT(spin->magic == KSPIN_MAGIC);
119
120         if (hw_lock_try(&spin->lock)) {
121                 SLASSERT(spin->owner == NULL);
122                 ON_SYNC_DEBUG(spin->owner = current_thread);
123                 return 1;
124         } else
125                 return 0;
126 }
127
128 /* SMP */
129 #else
130
131 /*
132  * uniprocessor version of spin-lock. Only checks.
133  */
134
135 void kspin_init(struct kspin *spin)
136 {
137         SLASSERT(spin != NULL);
138         ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
139         ON_SYNC_DEBUG(spin->owner = NULL);
140 }
141
142 void kspin_done(struct kspin *spin)
143 {
144         SLASSERT(spin != NULL);
145         SLASSERT(spin->magic == KSPIN_MAGIC);
146         SLASSERT(spin->owner == NULL);
147 }
148
149 void kspin_lock(struct kspin *spin)
150 {
151         SLASSERT(spin != NULL);
152         SLASSERT(spin->magic == KSPIN_MAGIC);
153         SLASSERT(spin->owner == NULL);
154         ON_SYNC_DEBUG(spin->owner = current_thread);
155 }
156
157 void kspin_unlock(struct kspin *spin)
158 {
159         SLASSERT(spin != NULL);
160         SLASSERT(spin->magic == KSPIN_MAGIC);
161         SLASSERT(spin->owner == current_thread);
162         ON_SYNC_DEBUG(spin->owner = NULL);
163 }
164
165 int kspin_trylock(struct kspin *spin)
166 {
167         SLASSERT(spin != NULL);
168         SLASSERT(spin->magic == KSPIN_MAGIC);
169         SLASSERT(spin->owner == NULL);
170         ON_SYNC_DEBUG(spin->owner = current_thread);
171         return 1;
172 }
173
174 /* SMP */
175 #endif
176
177 #if XNU_SYNC_DEBUG
178 int kspin_islocked(struct kspin *spin)
179 {
180         SLASSERT(spin != NULL);
181         SLASSERT(spin->magic == KSPIN_MAGIC);
182         return spin->owner == current_thread;
183 }
184
185 int kspin_isnotlocked(struct kspin *spin)
186 {
187         SLASSERT(spin != NULL);
188         SLASSERT(spin->magic == KSPIN_MAGIC);
189         return spin->owner != current_thread;
190 }
191 #endif
192
193 void ksem_init(struct ksem *sem, int value)
194 {
195         SLASSERT(sem != NULL);
196         kspin_init(&sem->guard);
197         wait_queue_init(&sem->q, SYNC_POLICY_FIFO);
198         sem->value = value;
199         ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
200 }
201
202 void ksem_done(struct ksem *sem)
203 {
204         SLASSERT(sem != NULL);
205         SLASSERT(sem->magic == KSEM_MAGIC);
206         /*
207          * XXX nikita: cannot check that &sem->q is empty because
208          * wait_queue_empty() is Apple private API.
209          */
210         kspin_done(&sem->guard);
211 }
212
213 int ksem_up(struct ksem *sem, int value)
214 {
215         int result;
216
217         SLASSERT(sem != NULL);
218         SLASSERT(sem->magic == KSEM_MAGIC);
219         SLASSERT(value >= 0);
220
221         kspin_lock(&sem->guard);
222         sem->value += value;
223         if (sem->value == 0)
224                 result = wait_queue_wakeup_one(&sem->q, (event_t)sem,
225                                                THREAD_AWAKENED);
226         else
227                 result = wait_queue_wakeup_all(&sem->q, (event_t)sem,
228                                                THREAD_AWAKENED);
229         kspin_unlock(&sem->guard);
230         SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
231         return (result == KERN_SUCCESS) ? 0 : 1;
232 }
233
234 void ksem_down(struct ksem *sem, int value)
235 {
236         int result;
237
238         SLASSERT(sem != NULL);
239         SLASSERT(sem->magic == KSEM_MAGIC);
240         SLASSERT(value >= 0);
241         SLASSERT(get_preemption_level() == 0);
242
243         kspin_lock(&sem->guard);
244         while (sem->value < value) {
245                 result = wait_queue_assert_wait(&sem->q, (event_t)sem,
246                                                 THREAD_UNINT);
247                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
248                 kspin_unlock(&sem->guard);
249                 if (result == THREAD_WAITING)
250                         thread_block(THREAD_CONTINUE_NULL);
251                 kspin_lock(&sem->guard);
252         }
253         sem->value -= value;
254         kspin_unlock(&sem->guard);
255 }
256
257 int ksem_trydown(struct ksem *sem, int value)
258 {
259         int result;
260
261         SLASSERT(sem != NULL);
262         SLASSERT(sem->magic == KSEM_MAGIC);
263         SLASSERT(value >= 0);
264
265         kspin_lock(&sem->guard);
266         if (sem->value >= value) {
267                 sem->value -= value;
268                 result = 0;
269         } else
270                 result = -EBUSY;
271         kspin_unlock(&sem->guard);
272         return result;
273 }
274
275 void kmut_init(struct kmut *mut)
276 {
277         SLASSERT(mut != NULL);
278         ksem_init(&mut->s, 1);
279         ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
280         ON_SYNC_DEBUG(mut->owner = NULL);
281 }
282
283 void kmut_done(struct kmut *mut)
284 {
285         SLASSERT(mut != NULL);
286         SLASSERT(mut->magic == KMUT_MAGIC);
287         SLASSERT(mut->owner == NULL);
288         ksem_done(&mut->s);
289 }
290
291 void kmut_lock(struct kmut *mut)
292 {
293         SLASSERT(mut != NULL);
294         SLASSERT(mut->magic == KMUT_MAGIC);
295         SLASSERT(mut->owner != current_thread);
296         SLASSERT(get_preemption_level() == 0);
297
298         ksem_down(&mut->s, 1);
299         ON_SYNC_DEBUG(mut->owner = current_thread);
300 }
301
302 void kmut_unlock(struct kmut *mut)
303 {
304         SLASSERT(mut != NULL);
305         SLASSERT(mut->magic == KMUT_MAGIC);
306         SLASSERT(mut->owner == current_thread);
307
308         ON_SYNC_DEBUG(mut->owner = NULL);
309         ksem_up(&mut->s, 1);
310 }
311
312 int kmut_trylock(struct kmut *mut)
313 {
314         SLASSERT(mut != NULL);
315         SLASSERT(mut->magic == KMUT_MAGIC);
316         return ksem_trydown(&mut->s, 1);
317 }
318
319 #if XNU_SYNC_DEBUG
320 int kmut_islocked(struct kmut *mut)
321 {
322         SLASSERT(mut != NULL);
323         SLASSERT(mut->magic == KMUT_MAGIC);
324         return mut->owner == current_thread;
325 }
326
327 int kmut_isnotlocked(struct kmut *mut)
328 {
329         SLASSERT(mut != NULL);
330         SLASSERT(mut->magic == KMUT_MAGIC);
331         return mut->owner != current_thread;
332 }
333 #endif
334
335
336 void kcond_init(struct kcond *cond)
337 {
338         SLASSERT(cond != NULL);
339
340         kspin_init(&cond->guard);
341         cond->waiters = NULL;
342         ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
343 }
344
345 void kcond_done(struct kcond *cond)
346 {
347         SLASSERT(cond != NULL);
348         SLASSERT(cond->magic == KCOND_MAGIC);
349         SLASSERT(cond->waiters == NULL);
350         kspin_done(&cond->guard);
351 }
352
353 void kcond_wait(struct kcond *cond, struct kspin *lock)
354 {
355         struct kcond_link link;
356
357         SLASSERT(cond != NULL);
358         SLASSERT(lock != NULL);
359         SLASSERT(cond->magic == KCOND_MAGIC);
360         SLASSERT(kspin_islocked(lock));
361
362         ksem_init(&link.sem, 0);
363         kspin_lock(&cond->guard);
364         link.next = cond->waiters;
365         cond->waiters = &link;
366         kspin_unlock(&cond->guard);
367         kspin_unlock(lock);
368
369         ksem_down(&link.sem, 1);
370
371         kspin_lock(&cond->guard);
372         kspin_unlock(&cond->guard);
373         kspin_lock(lock);
374 }
375
376 void kcond_wait_guard(struct kcond *cond)
377 {
378         struct kcond_link link;
379
380         SLASSERT(cond != NULL);
381         SLASSERT(cond->magic == KCOND_MAGIC);
382         SLASSERT(kspin_islocked(&cond->guard));
383
384         ksem_init(&link.sem, 0);
385         link.next = cond->waiters;
386         cond->waiters = &link;
387         kspin_unlock(&cond->guard);
388
389         ksem_down(&link.sem, 1);
390
391         kspin_lock(&cond->guard);
392 }
393
394 void kcond_signal_guard(struct kcond *cond)
395 {
396         struct kcond_link *link;
397
398         SLASSERT(cond != NULL);
399         SLASSERT(cond->magic == KCOND_MAGIC);
400         SLASSERT(kspin_islocked(&cond->guard));
401
402         link = cond->waiters;
403         if (link != NULL) {
404                 cond->waiters = link->next;
405                 ksem_up(&link->sem, 1);
406         }
407 }
408
409 void kcond_signal(struct kcond *cond)
410 {
411         SLASSERT(cond != NULL);
412         SLASSERT(cond->magic == KCOND_MAGIC);
413
414         kspin_lock(&cond->guard);
415         kcond_signal_guard(cond);
416         kspin_unlock(&cond->guard);
417 }
418
419 void kcond_broadcast_guard(struct kcond *cond)
420 {
421         struct kcond_link *link;
422
423         SLASSERT(cond != NULL);
424         SLASSERT(cond->magic == KCOND_MAGIC);
425         SLASSERT(kspin_islocked(&cond->guard));
426
427         for (link = cond->waiters; link != NULL; link = link->next)
428                 ksem_up(&link->sem, 1);
429         cond->waiters = NULL;
430 }
431
432 void kcond_broadcast(struct kcond *cond)
433 {
434         SLASSERT(cond != NULL);
435         SLASSERT(cond->magic == KCOND_MAGIC);
436
437         kspin_lock(&cond->guard);
438         kcond_broadcast_guard(cond);
439         kspin_unlock(&cond->guard);
440 }
441
442 void krw_sem_init(struct krw_sem *sem)
443 {
444         SLASSERT(sem != NULL);
445
446         kcond_init(&sem->cond);
447         sem->count = 0;
448         ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
449 }
450
451 void krw_sem_done(struct krw_sem *sem)
452 {
453         SLASSERT(sem != NULL);
454         SLASSERT(sem->magic == KRW_MAGIC);
455         SLASSERT(sem->count == 0);
456         kcond_done(&sem->cond);
457 }
458
459 void krw_sem_down_r(struct krw_sem *sem)
460 {
461         SLASSERT(sem != NULL);
462         SLASSERT(sem->magic == KRW_MAGIC);
463         SLASSERT(get_preemption_level() == 0);
464
465         kspin_lock(&sem->cond.guard);
466         while (sem->count < 0)
467                 kcond_wait_guard(&sem->cond);
468         ++ sem->count;
469         kspin_unlock(&sem->cond.guard);
470 }
471
472 int krw_sem_down_r_try(struct krw_sem *sem)
473 {
474         SLASSERT(sem != NULL);
475         SLASSERT(sem->magic == KRW_MAGIC);
476
477         kspin_lock(&sem->cond.guard);
478         if (sem->count < 0) {
479                 kspin_unlock(&sem->cond.guard);
480                 return -EBUSY;
481         }
482         ++ sem->count;
483         kspin_unlock(&sem->cond.guard);
484         return 0;
485 }
486
487 void krw_sem_down_w(struct krw_sem *sem)
488 {
489         SLASSERT(sem != NULL);
490         SLASSERT(sem->magic == KRW_MAGIC);
491         SLASSERT(get_preemption_level() == 0);
492
493         kspin_lock(&sem->cond.guard);
494         while (sem->count != 0)
495                 kcond_wait_guard(&sem->cond);
496         sem->count = -1;
497         kspin_unlock(&sem->cond.guard);
498 }
499
500 int krw_sem_down_w_try(struct krw_sem *sem)
501 {
502         SLASSERT(sem != NULL);
503         SLASSERT(sem->magic == KRW_MAGIC);
504
505         kspin_lock(&sem->cond.guard);
506         if (sem->count != 0) {
507                 kspin_unlock(&sem->cond.guard);
508                 return -EBUSY;
509         }
510         sem->count = -1;
511         kspin_unlock(&sem->cond.guard);
512         return 0;
513 }
514
515 void krw_sem_up_r(struct krw_sem *sem)
516 {
517         SLASSERT(sem != NULL);
518         SLASSERT(sem->magic == KRW_MAGIC);
519         SLASSERT(sem->count > 0);
520
521         kspin_lock(&sem->cond.guard);
522         -- sem->count;
523         if (sem->count == 0)
524                 kcond_broadcast_guard(&sem->cond);
525         kspin_unlock(&sem->cond.guard);
526 }
527
528 void krw_sem_up_w(struct krw_sem *sem)
529 {
530         SLASSERT(sem != NULL);
531         SLASSERT(sem->magic == KRW_MAGIC);
532         SLASSERT(sem->count == -1);
533
534         kspin_lock(&sem->cond.guard);
535         sem->count = 0;
536         kspin_unlock(&sem->cond.guard);
537         kcond_broadcast(&sem->cond);
538 }
539
540 void ksleep_chan_init(struct ksleep_chan *chan)
541 {
542         SLASSERT(chan != NULL);
543
544         kspin_init(&chan->guard);
545         CFS_INIT_LIST_HEAD(&chan->waiters);
546         ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
547 }
548
549 void ksleep_chan_done(struct ksleep_chan *chan)
550 {
551         SLASSERT(chan != NULL);
552         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
553         SLASSERT(list_empty(&chan->waiters));
554         kspin_done(&chan->guard);
555 }
556
557 void ksleep_link_init(struct ksleep_link *link)
558 {
559         SLASSERT(link != NULL);
560
561         CFS_INIT_LIST_HEAD(&link->linkage);
562         link->flags = 0;
563         link->event = current_thread;
564         link->hits  = 0;
565         link->forward = NULL;
566         ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
567 }
568
569 void ksleep_link_done(struct ksleep_link *link)
570 {
571         SLASSERT(link != NULL);
572         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
573         SLASSERT(list_empty(&link->linkage));
574 }
575
576 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
577 {
578         SLASSERT(chan != NULL);
579         SLASSERT(link != NULL);
580         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
581         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
582         SLASSERT(list_empty(&link->linkage));
583
584         kspin_lock(&chan->guard);
585         if (link->flags & KSLEEP_EXCLUSIVE)
586                 list_add_tail(&link->linkage, &chan->waiters);
587         else
588                 list_add(&link->linkage, &chan->waiters);
589         kspin_unlock(&chan->guard);
590 }
591
592 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
593 {
594         SLASSERT(chan != NULL);
595         SLASSERT(link != NULL);
596         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
597         SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
598
599         kspin_lock(&chan->guard);
600         list_del_init(&link->linkage);
601         kspin_unlock(&chan->guard);
602 }
603
604 static int has_hits(struct ksleep_chan *chan, event_t event)
605 {
606         struct ksleep_link *scan;
607
608         SLASSERT(kspin_islocked(&chan->guard));
609         list_for_each_entry(scan, &chan->waiters, linkage) {
610                 if (scan->event == event && scan->hits > 0) {
611                         /* consume hit */
612                         -- scan->hits;
613                         return 1;
614                 }
615         }
616         return 0;
617 }
618
619 static void add_hit(struct ksleep_chan *chan, event_t event)
620 {
621         struct ksleep_link *scan;
622
623         SLASSERT(kspin_islocked(&chan->guard));
624         list_for_each_entry(scan, &chan->waiters, linkage) {
625                 if (scan->event == event) {
626                         ++ scan->hits;
627                         break;
628                 }
629         }
630 }
631
632 void ksleep_wait(struct ksleep_chan *chan)
633 {
634         event_t event;
635         int     result;
636
637         ENTRY;
638
639         SLASSERT(chan != NULL);
640         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
641         SLASSERT(get_preemption_level() == 0);
642
643         event = current_thread;
644         kspin_lock(&chan->guard);
645         if (!has_hits(chan, event)) {
646                 result = assert_wait(event, THREAD_UNINT);
647                 kspin_unlock(&chan->guard);
648                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
649                 if (result == THREAD_WAITING)
650                         thread_block(THREAD_CONTINUE_NULL);
651         } else
652                 kspin_unlock(&chan->guard);
653         EXIT;
654 }
655
656 int64_t ksleep_timedwait(struct ksleep_chan *chan, uint64_t timeout)
657 {
658         event_t event;
659         int64_t     result; 
660         AbsoluteTime clock_current; 
661         AbsoluteTime clock_delay;
662
663         ENTRY;
664
665         SLASSERT(chan != NULL);
666         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
667         SLASSERT(get_preemption_level() == 0);
668
669         CDEBUG(D_TRACE, "timeout: %llu\n", (long long unsigned)timeout);
670
671         event = current_thread;
672         result = 0;
673         kspin_lock(&chan->guard);
674         if (!has_hits(chan, event)) {
675                 result = assert_wait(event, THREAD_UNINT);
676                 if (timeout > 0) {
677                         /*
678                          * arm a timer. thread_set_timer()'s first argument is
679                          * uint32_t, so we have to cook deadline ourselves.
680                          */
681                         clock_get_uptime(&clock_current);
682                         nanoseconds_to_absolutetime(timeout, &clock_delay);
683                         ADD_ABSOLUTETIME(&clock_current, &clock_delay);
684                         thread_set_timer_deadline(clock_current);
685                 }
686                 kspin_unlock(&chan->guard);
687                 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
688                 if (result == THREAD_WAITING)
689                         result = thread_block(THREAD_CONTINUE_NULL);
690                 thread_cancel_timer();
691
692                 clock_get_uptime(&clock_delay);
693                 SUB_ABSOLUTETIME(&clock_delay, &clock_current);
694                 if (result == THREAD_TIMED_OUT)
695                         result = 0;
696                 else {
697                         absolutetime_to_nanoseconds(clock_delay, &result);
698                         if (result < 0)
699                                 result = 0;
700                 }
701         } else
702                 kspin_unlock(&chan->guard);
703
704         RETURN(result);
705 }
706
707 /*
708  * wake up single exclusive waiter (plus some arbitrary number of *
709  * non-exclusive)
710  */
711 void ksleep_wake(struct ksleep_chan *chan)
712 {
713         ENTRY;
714         ksleep_wake_nr(chan, 1);
715         EXIT;
716 }
717
718 /*
719  * wake up all waiters on @chan
720  */
721 void ksleep_wake_all(struct ksleep_chan *chan)
722 {
723         ENTRY;
724         ksleep_wake_nr(chan, 0);
725         EXIT;
726 }
727
728 /*
729  * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
730  * number of non-exclusive. If @nr is 0, wake up all waiters.
731  */
732 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
733 {
734         struct ksleep_link *scan;
735         int result;
736
737         ENTRY;
738
739         SLASSERT(chan != NULL);
740         SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
741
742         kspin_lock(&chan->guard);
743         list_for_each_entry(scan, &chan->waiters, linkage) {
744                 struct ksleep_chan *forward;
745
746                 forward = scan->forward;
747                 if (forward != NULL)
748                         kspin_lock(&forward->guard);
749                 result = thread_wakeup(scan->event);
750                 CDEBUG(D_INFO, "waking 0x%x: %d\n",
751                        (unsigned int)scan->event, result);
752                 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
753                 if (result == KERN_NOT_WAITING) {
754                         ++ scan->hits;
755                         if (forward != NULL)
756                                 add_hit(forward, scan->event);
757                 }
758                 if (forward != NULL)
759                         kspin_unlock(&forward->guard);
760                 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
761                         break;
762         }
763         kspin_unlock(&chan->guard);
764         EXIT;
765 }
766
767 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
768 {
769         SLASSERT(t != NULL);
770         SLASSERT(func != NULL);
771
772         kspin_init(&t->guard);
773         t->func = func;
774         t->arg  = arg;
775         ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
776 }
777
778 void ktimer_done(struct ktimer *t)
779 {
780         SLASSERT(t != NULL);
781         SLASSERT(t->magic == KTIMER_MAGIC);
782         kspin_done(&t->guard);
783         ON_SYNC_DEBUG(t->magic = 0);
784 }
785
786 static void ktimer_actor(void *arg0, void *arg1)
787 {
788         struct ktimer *t;
789         int            armed;
790
791         t = arg0;
792         /*
793          * this assumes that ktimer's are never freed.
794          */
795         SLASSERT(t != NULL);
796         SLASSERT(t->magic == KTIMER_MAGIC);
797
798         /*
799          * call actual timer function
800          */
801         kspin_lock(&t->guard);
802         armed = t->armed;
803         t->armed = 0;
804         kspin_unlock(&t->guard);
805
806         if (armed)
807                 t->func(t->arg);
808 }
809
810 static void ktimer_disarm_locked(struct ktimer *t)
811 {
812         SLASSERT(t != NULL);
813         SLASSERT(t->magic == KTIMER_MAGIC);
814
815         thread_call_func_cancel(ktimer_actor, t, FALSE);
816 }
817
818 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
819 {
820         SLASSERT(t != NULL);
821         SLASSERT(t->magic == KTIMER_MAGIC);
822
823         kspin_lock(&t->guard);
824         ktimer_disarm_locked(t);
825         t->armed = 1;
826         thread_call_func_delayed(ktimer_actor, t, *(AbsoluteTime *)&deadline);
827         kspin_unlock(&t->guard);
828 }
829
830 void ktimer_disarm(struct ktimer *t)
831 {
832         SLASSERT(t != NULL);
833         SLASSERT(t->magic == KTIMER_MAGIC);
834
835         kspin_lock(&t->guard);
836         t->armed = 0;
837         ktimer_disarm_locked(t);
838         kspin_unlock(&t->guard);
839 }
840
841 int ktimer_is_armed(struct ktimer *t)
842 {
843         SLASSERT(t != NULL);
844         SLASSERT(t->magic == KTIMER_MAGIC);
845
846         /*
847          * no locking---result is only a hint anyway.
848          */
849         return t->armed;
850 }
851
852 u_int64_t ktimer_deadline(struct ktimer *t)
853 {
854         SLASSERT(t != NULL);
855         SLASSERT(t->magic == KTIMER_MAGIC);
856
857         return t->deadline;
858 }
859
860 /*
861  * Local variables:
862  * c-indentation-style: "K&R"
863  * c-basic-offset: 8
864  * tab-width: 8
865  * fill-column: 80
866  * scroll-step: 1
867  * End:
868  */