1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Lustre Light Super operations
6 * Copyright (c) 2004 Cluster File Systems, Inc.
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or modify it under
11 * the terms of version 2 of the GNU General Public License as published by
12 * the Free Software Foundation. Lustre is distributed in the hope that it
13 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
14 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. You should have received a
16 * copy of the GNU General Public License along with Lustre; if not, write
17 * to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
24 * Created by nikita on Sun Jul 18 2004.
26 * Prototypes of XNU synchronization primitives.
30 * This file contains very simplistic implementations of (saner) API for
31 * basic synchronization primitives:
39 * - condition variable (kcond)
41 * - wait-queue (ksleep_chan and ksleep_link)
45 * A lot can be optimized here.
48 #include <mach/mach_types.h>
49 #include <sys/types.h>
50 #include <kern/simple_lock.h>
52 #define DEBUG_SUBSYSTEM S_PORTALS
54 #include <libcfs/libcfs.h>
55 #include <libcfs/kp30.h>
57 #define SLASSERT(e) ON_SYNC_DEBUG(LASSERT(e))
59 #ifdef HAVE_GET_PREEMPTION_LEVEL
60 extern int get_preemption_level(void);
62 #define get_preemption_level() (0)
66 * Warning: low level portals debugging code (portals_debug_msg(), for
67 * example), uses spin-locks, so debugging output here may lead to nasty
73 extern void hw_lock_init(hw_lock_t);
74 extern void hw_lock_lock(hw_lock_t);
75 extern void hw_lock_unlock(hw_lock_t);
76 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
77 extern unsigned int hw_lock_try(hw_lock_t);
78 extern unsigned int hw_lock_held(hw_lock_t);
80 void kspin_init(struct kspin *spin)
82 SLASSERT(spin != NULL);
83 hw_lock_init(&spin->lock);
84 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
85 ON_SYNC_DEBUG(spin->owner = NULL);
88 void kspin_done(struct kspin *spin)
90 SLASSERT(spin != NULL);
91 SLASSERT(spin->magic == KSPIN_MAGIC);
92 SLASSERT(spin->owner == NULL);
95 void kspin_lock(struct kspin *spin)
97 SLASSERT(spin != NULL);
98 SLASSERT(spin->magic == KSPIN_MAGIC);
99 SLASSERT(spin->owner != current_thread);
101 hw_lock_lock(&spin->lock);
102 SLASSERT(spin->owner == NULL);
103 ON_SYNC_DEBUG(spin->owner = current_thread);
106 void kspin_unlock(struct kspin *spin)
108 SLASSERT(spin != NULL);
109 SLASSERT(spin->magic == KSPIN_MAGIC);
110 SLASSERT(spin->owner == current_thread);
111 ON_SYNC_DEBUG(spin->owner = NULL);
112 hw_lock_unlock(&spin->lock);
115 int kspin_trylock(struct kspin *spin)
117 SLASSERT(spin != NULL);
118 SLASSERT(spin->magic == KSPIN_MAGIC);
120 if (hw_lock_try(&spin->lock)) {
121 SLASSERT(spin->owner == NULL);
122 ON_SYNC_DEBUG(spin->owner = current_thread);
132 * uniprocessor version of spin-lock. Only checks.
135 void kspin_init(struct kspin *spin)
137 SLASSERT(spin != NULL);
138 ON_SYNC_DEBUG(spin->magic = KSPIN_MAGIC);
139 ON_SYNC_DEBUG(spin->owner = NULL);
142 void kspin_done(struct kspin *spin)
144 SLASSERT(spin != NULL);
145 SLASSERT(spin->magic == KSPIN_MAGIC);
146 SLASSERT(spin->owner == NULL);
149 void kspin_lock(struct kspin *spin)
151 SLASSERT(spin != NULL);
152 SLASSERT(spin->magic == KSPIN_MAGIC);
153 SLASSERT(spin->owner == NULL);
154 ON_SYNC_DEBUG(spin->owner = current_thread);
157 void kspin_unlock(struct kspin *spin)
159 SLASSERT(spin != NULL);
160 SLASSERT(spin->magic == KSPIN_MAGIC);
161 SLASSERT(spin->owner == current_thread);
162 ON_SYNC_DEBUG(spin->owner = NULL);
165 int kspin_trylock(struct kspin *spin)
167 SLASSERT(spin != NULL);
168 SLASSERT(spin->magic == KSPIN_MAGIC);
169 SLASSERT(spin->owner == NULL);
170 ON_SYNC_DEBUG(spin->owner = current_thread);
178 int kspin_islocked(struct kspin *spin)
180 SLASSERT(spin != NULL);
181 SLASSERT(spin->magic == KSPIN_MAGIC);
182 return spin->owner == current_thread;
185 int kspin_isnotlocked(struct kspin *spin)
187 SLASSERT(spin != NULL);
188 SLASSERT(spin->magic == KSPIN_MAGIC);
189 return spin->owner != current_thread;
193 void ksem_init(struct ksem *sem, int value)
195 SLASSERT(sem != NULL);
196 kspin_init(&sem->guard);
197 wait_queue_init(&sem->q, SYNC_POLICY_FIFO);
199 ON_SYNC_DEBUG(sem->magic = KSEM_MAGIC);
202 void ksem_done(struct ksem *sem)
204 SLASSERT(sem != NULL);
205 SLASSERT(sem->magic == KSEM_MAGIC);
207 * XXX nikita: cannot check that &sem->q is empty because
208 * wait_queue_empty() is Apple private API.
210 kspin_done(&sem->guard);
213 int ksem_up(struct ksem *sem, int value)
217 SLASSERT(sem != NULL);
218 SLASSERT(sem->magic == KSEM_MAGIC);
219 SLASSERT(value >= 0);
221 kspin_lock(&sem->guard);
224 result = wait_queue_wakeup_one(&sem->q, (event_t)sem,
227 result = wait_queue_wakeup_all(&sem->q, (event_t)sem,
229 kspin_unlock(&sem->guard);
230 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
231 return (result == KERN_SUCCESS) ? 0 : 1;
234 void ksem_down(struct ksem *sem, int value)
238 SLASSERT(sem != NULL);
239 SLASSERT(sem->magic == KSEM_MAGIC);
240 SLASSERT(value >= 0);
241 SLASSERT(get_preemption_level() == 0);
243 kspin_lock(&sem->guard);
244 while (sem->value < value) {
245 result = wait_queue_assert_wait(&sem->q, (event_t)sem,
247 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
248 kspin_unlock(&sem->guard);
249 if (result == THREAD_WAITING)
250 thread_block(THREAD_CONTINUE_NULL);
251 kspin_lock(&sem->guard);
254 kspin_unlock(&sem->guard);
257 int ksem_trydown(struct ksem *sem, int value)
261 SLASSERT(sem != NULL);
262 SLASSERT(sem->magic == KSEM_MAGIC);
263 SLASSERT(value >= 0);
265 kspin_lock(&sem->guard);
266 if (sem->value >= value) {
271 kspin_unlock(&sem->guard);
275 void kmut_init(struct kmut *mut)
277 SLASSERT(mut != NULL);
278 ksem_init(&mut->s, 1);
279 ON_SYNC_DEBUG(mut->magic = KMUT_MAGIC);
280 ON_SYNC_DEBUG(mut->owner = NULL);
283 void kmut_done(struct kmut *mut)
285 SLASSERT(mut != NULL);
286 SLASSERT(mut->magic == KMUT_MAGIC);
287 SLASSERT(mut->owner == NULL);
291 void kmut_lock(struct kmut *mut)
293 SLASSERT(mut != NULL);
294 SLASSERT(mut->magic == KMUT_MAGIC);
295 SLASSERT(mut->owner != current_thread);
296 SLASSERT(get_preemption_level() == 0);
298 ksem_down(&mut->s, 1);
299 ON_SYNC_DEBUG(mut->owner = current_thread);
302 void kmut_unlock(struct kmut *mut)
304 SLASSERT(mut != NULL);
305 SLASSERT(mut->magic == KMUT_MAGIC);
306 SLASSERT(mut->owner == current_thread);
308 ON_SYNC_DEBUG(mut->owner = NULL);
312 int kmut_trylock(struct kmut *mut)
314 SLASSERT(mut != NULL);
315 SLASSERT(mut->magic == KMUT_MAGIC);
316 return ksem_trydown(&mut->s, 1);
320 int kmut_islocked(struct kmut *mut)
322 SLASSERT(mut != NULL);
323 SLASSERT(mut->magic == KMUT_MAGIC);
324 return mut->owner == current_thread;
327 int kmut_isnotlocked(struct kmut *mut)
329 SLASSERT(mut != NULL);
330 SLASSERT(mut->magic == KMUT_MAGIC);
331 return mut->owner != current_thread;
336 void kcond_init(struct kcond *cond)
338 SLASSERT(cond != NULL);
340 kspin_init(&cond->guard);
341 cond->waiters = NULL;
342 ON_SYNC_DEBUG(cond->magic = KCOND_MAGIC);
345 void kcond_done(struct kcond *cond)
347 SLASSERT(cond != NULL);
348 SLASSERT(cond->magic == KCOND_MAGIC);
349 SLASSERT(cond->waiters == NULL);
350 kspin_done(&cond->guard);
353 void kcond_wait(struct kcond *cond, struct kspin *lock)
355 struct kcond_link link;
357 SLASSERT(cond != NULL);
358 SLASSERT(lock != NULL);
359 SLASSERT(cond->magic == KCOND_MAGIC);
360 SLASSERT(kspin_islocked(lock));
362 ksem_init(&link.sem, 0);
363 kspin_lock(&cond->guard);
364 link.next = cond->waiters;
365 cond->waiters = &link;
366 kspin_unlock(&cond->guard);
369 ksem_down(&link.sem, 1);
371 kspin_lock(&cond->guard);
372 kspin_unlock(&cond->guard);
376 void kcond_wait_guard(struct kcond *cond)
378 struct kcond_link link;
380 SLASSERT(cond != NULL);
381 SLASSERT(cond->magic == KCOND_MAGIC);
382 SLASSERT(kspin_islocked(&cond->guard));
384 ksem_init(&link.sem, 0);
385 link.next = cond->waiters;
386 cond->waiters = &link;
387 kspin_unlock(&cond->guard);
389 ksem_down(&link.sem, 1);
391 kspin_lock(&cond->guard);
394 void kcond_signal_guard(struct kcond *cond)
396 struct kcond_link *link;
398 SLASSERT(cond != NULL);
399 SLASSERT(cond->magic == KCOND_MAGIC);
400 SLASSERT(kspin_islocked(&cond->guard));
402 link = cond->waiters;
404 cond->waiters = link->next;
405 ksem_up(&link->sem, 1);
409 void kcond_signal(struct kcond *cond)
411 SLASSERT(cond != NULL);
412 SLASSERT(cond->magic == KCOND_MAGIC);
414 kspin_lock(&cond->guard);
415 kcond_signal_guard(cond);
416 kspin_unlock(&cond->guard);
419 void kcond_broadcast_guard(struct kcond *cond)
421 struct kcond_link *link;
423 SLASSERT(cond != NULL);
424 SLASSERT(cond->magic == KCOND_MAGIC);
425 SLASSERT(kspin_islocked(&cond->guard));
427 for (link = cond->waiters; link != NULL; link = link->next)
428 ksem_up(&link->sem, 1);
429 cond->waiters = NULL;
432 void kcond_broadcast(struct kcond *cond)
434 SLASSERT(cond != NULL);
435 SLASSERT(cond->magic == KCOND_MAGIC);
437 kspin_lock(&cond->guard);
438 kcond_broadcast_guard(cond);
439 kspin_unlock(&cond->guard);
442 void krw_sem_init(struct krw_sem *sem)
444 SLASSERT(sem != NULL);
446 kcond_init(&sem->cond);
448 ON_SYNC_DEBUG(sem->magic = KRW_MAGIC);
451 void krw_sem_done(struct krw_sem *sem)
453 SLASSERT(sem != NULL);
454 SLASSERT(sem->magic == KRW_MAGIC);
455 SLASSERT(sem->count == 0);
456 kcond_done(&sem->cond);
459 void krw_sem_down_r(struct krw_sem *sem)
461 SLASSERT(sem != NULL);
462 SLASSERT(sem->magic == KRW_MAGIC);
463 SLASSERT(get_preemption_level() == 0);
465 kspin_lock(&sem->cond.guard);
466 while (sem->count < 0)
467 kcond_wait_guard(&sem->cond);
469 kspin_unlock(&sem->cond.guard);
472 int krw_sem_down_r_try(struct krw_sem *sem)
474 SLASSERT(sem != NULL);
475 SLASSERT(sem->magic == KRW_MAGIC);
477 kspin_lock(&sem->cond.guard);
478 if (sem->count < 0) {
479 kspin_unlock(&sem->cond.guard);
483 kspin_unlock(&sem->cond.guard);
487 void krw_sem_down_w(struct krw_sem *sem)
489 SLASSERT(sem != NULL);
490 SLASSERT(sem->magic == KRW_MAGIC);
491 SLASSERT(get_preemption_level() == 0);
493 kspin_lock(&sem->cond.guard);
494 while (sem->count != 0)
495 kcond_wait_guard(&sem->cond);
497 kspin_unlock(&sem->cond.guard);
500 int krw_sem_down_w_try(struct krw_sem *sem)
502 SLASSERT(sem != NULL);
503 SLASSERT(sem->magic == KRW_MAGIC);
505 kspin_lock(&sem->cond.guard);
506 if (sem->count != 0) {
507 kspin_unlock(&sem->cond.guard);
511 kspin_unlock(&sem->cond.guard);
515 void krw_sem_up_r(struct krw_sem *sem)
517 SLASSERT(sem != NULL);
518 SLASSERT(sem->magic == KRW_MAGIC);
519 SLASSERT(sem->count > 0);
521 kspin_lock(&sem->cond.guard);
524 kcond_broadcast_guard(&sem->cond);
525 kspin_unlock(&sem->cond.guard);
528 void krw_sem_up_w(struct krw_sem *sem)
530 SLASSERT(sem != NULL);
531 SLASSERT(sem->magic == KRW_MAGIC);
532 SLASSERT(sem->count == -1);
534 kspin_lock(&sem->cond.guard);
536 kspin_unlock(&sem->cond.guard);
537 kcond_broadcast(&sem->cond);
540 void ksleep_chan_init(struct ksleep_chan *chan)
542 SLASSERT(chan != NULL);
544 kspin_init(&chan->guard);
545 CFS_INIT_LIST_HEAD(&chan->waiters);
546 ON_SYNC_DEBUG(chan->magic = KSLEEP_CHAN_MAGIC);
549 void ksleep_chan_done(struct ksleep_chan *chan)
551 SLASSERT(chan != NULL);
552 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
553 SLASSERT(list_empty(&chan->waiters));
554 kspin_done(&chan->guard);
557 void ksleep_link_init(struct ksleep_link *link)
559 SLASSERT(link != NULL);
561 CFS_INIT_LIST_HEAD(&link->linkage);
563 link->event = current_thread;
565 link->forward = NULL;
566 ON_SYNC_DEBUG(link->magic = KSLEEP_LINK_MAGIC);
569 void ksleep_link_done(struct ksleep_link *link)
571 SLASSERT(link != NULL);
572 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
573 SLASSERT(list_empty(&link->linkage));
576 void ksleep_add(struct ksleep_chan *chan, struct ksleep_link *link)
578 SLASSERT(chan != NULL);
579 SLASSERT(link != NULL);
580 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
581 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
582 SLASSERT(list_empty(&link->linkage));
584 kspin_lock(&chan->guard);
585 if (link->flags & KSLEEP_EXCLUSIVE)
586 list_add_tail(&link->linkage, &chan->waiters);
588 list_add(&link->linkage, &chan->waiters);
589 kspin_unlock(&chan->guard);
592 void ksleep_del(struct ksleep_chan *chan, struct ksleep_link *link)
594 SLASSERT(chan != NULL);
595 SLASSERT(link != NULL);
596 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
597 SLASSERT(link->magic == KSLEEP_LINK_MAGIC);
599 kspin_lock(&chan->guard);
600 list_del_init(&link->linkage);
601 kspin_unlock(&chan->guard);
604 static int has_hits(struct ksleep_chan *chan, event_t event)
606 struct ksleep_link *scan;
608 SLASSERT(kspin_islocked(&chan->guard));
609 list_for_each_entry(scan, &chan->waiters, linkage) {
610 if (scan->event == event && scan->hits > 0) {
619 static void add_hit(struct ksleep_chan *chan, event_t event)
621 struct ksleep_link *scan;
623 SLASSERT(kspin_islocked(&chan->guard));
624 list_for_each_entry(scan, &chan->waiters, linkage) {
625 if (scan->event == event) {
632 void ksleep_wait(struct ksleep_chan *chan)
639 SLASSERT(chan != NULL);
640 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
641 SLASSERT(get_preemption_level() == 0);
643 event = current_thread;
644 kspin_lock(&chan->guard);
645 if (!has_hits(chan, event)) {
646 result = assert_wait(event, THREAD_UNINT);
647 kspin_unlock(&chan->guard);
648 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
649 if (result == THREAD_WAITING)
650 thread_block(THREAD_CONTINUE_NULL);
652 kspin_unlock(&chan->guard);
656 int64_t ksleep_timedwait(struct ksleep_chan *chan, uint64_t timeout)
660 AbsoluteTime clock_current;
661 AbsoluteTime clock_delay;
665 SLASSERT(chan != NULL);
666 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
667 SLASSERT(get_preemption_level() == 0);
669 CDEBUG(D_TRACE, "timeout: %llu\n", (long long unsigned)timeout);
671 event = current_thread;
673 kspin_lock(&chan->guard);
674 if (!has_hits(chan, event)) {
675 result = assert_wait(event, THREAD_UNINT);
678 * arm a timer. thread_set_timer()'s first argument is
679 * uint32_t, so we have to cook deadline ourselves.
681 clock_get_uptime(&clock_current);
682 nanoseconds_to_absolutetime(timeout, &clock_delay);
683 ADD_ABSOLUTETIME(&clock_current, &clock_delay);
684 thread_set_timer_deadline(clock_current);
686 kspin_unlock(&chan->guard);
687 SLASSERT(result == THREAD_AWAKENED || result == THREAD_WAITING);
688 if (result == THREAD_WAITING)
689 result = thread_block(THREAD_CONTINUE_NULL);
690 thread_cancel_timer();
692 clock_get_uptime(&clock_delay);
693 SUB_ABSOLUTETIME(&clock_delay, &clock_current);
694 if (result == THREAD_TIMED_OUT)
697 absolutetime_to_nanoseconds(clock_delay, &result);
702 kspin_unlock(&chan->guard);
708 * wake up single exclusive waiter (plus some arbitrary number of *
711 void ksleep_wake(struct ksleep_chan *chan)
714 ksleep_wake_nr(chan, 1);
719 * wake up all waiters on @chan
721 void ksleep_wake_all(struct ksleep_chan *chan)
724 ksleep_wake_nr(chan, 0);
729 * wakeup no more than @nr exclusive waiters from @chan, plus some arbitrary
730 * number of non-exclusive. If @nr is 0, wake up all waiters.
732 void ksleep_wake_nr(struct ksleep_chan *chan, int nr)
734 struct ksleep_link *scan;
739 SLASSERT(chan != NULL);
740 SLASSERT(chan->magic == KSLEEP_CHAN_MAGIC);
742 kspin_lock(&chan->guard);
743 list_for_each_entry(scan, &chan->waiters, linkage) {
744 struct ksleep_chan *forward;
746 forward = scan->forward;
748 kspin_lock(&forward->guard);
749 result = thread_wakeup(scan->event);
750 CDEBUG(D_INFO, "waking 0x%x: %d\n",
751 (unsigned int)scan->event, result);
752 SLASSERT(result == KERN_SUCCESS || result == KERN_NOT_WAITING);
753 if (result == KERN_NOT_WAITING) {
756 add_hit(forward, scan->event);
759 kspin_unlock(&forward->guard);
760 if ((scan->flags & KSLEEP_EXCLUSIVE) && --nr == 0)
763 kspin_unlock(&chan->guard);
767 void ktimer_init(struct ktimer *t, void (*func)(void *), void *arg)
770 SLASSERT(func != NULL);
772 kspin_init(&t->guard);
775 ON_SYNC_DEBUG(t->magic = KTIMER_MAGIC);
778 void ktimer_done(struct ktimer *t)
781 SLASSERT(t->magic == KTIMER_MAGIC);
782 kspin_done(&t->guard);
783 ON_SYNC_DEBUG(t->magic = 0);
786 static void ktimer_actor(void *arg0, void *arg1)
793 * this assumes that ktimer's are never freed.
796 SLASSERT(t->magic == KTIMER_MAGIC);
799 * call actual timer function
801 kspin_lock(&t->guard);
804 kspin_unlock(&t->guard);
810 static void ktimer_disarm_locked(struct ktimer *t)
813 SLASSERT(t->magic == KTIMER_MAGIC);
815 thread_call_func_cancel(ktimer_actor, t, FALSE);
818 void ktimer_arm(struct ktimer *t, u_int64_t deadline)
821 SLASSERT(t->magic == KTIMER_MAGIC);
823 kspin_lock(&t->guard);
824 ktimer_disarm_locked(t);
826 thread_call_func_delayed(ktimer_actor, t, *(AbsoluteTime *)&deadline);
827 kspin_unlock(&t->guard);
830 void ktimer_disarm(struct ktimer *t)
833 SLASSERT(t->magic == KTIMER_MAGIC);
835 kspin_lock(&t->guard);
837 ktimer_disarm_locked(t);
838 kspin_unlock(&t->guard);
841 int ktimer_is_armed(struct ktimer *t)
844 SLASSERT(t->magic == KTIMER_MAGIC);
847 * no locking---result is only a hint anyway.
852 u_int64_t ktimer_deadline(struct ktimer *t)
855 SLASSERT(t->magic == KTIMER_MAGIC);
862 * c-indentation-style: "K&R"