X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=libcfs%2Flibcfs%2Fuser-lock.c;h=81d5a119337876ca215ec78514645fb17b134e48;hb=65701b4a30efdb695776bcf690a2b3cabc928da1;hp=53ab2c447a94581640915af0846a9df3cb513198;hpb=dc528f3149065350edf169e4ead19770413a52b3;p=fs%2Flustre-release.git diff --git a/libcfs/libcfs/user-lock.c b/libcfs/libcfs/user-lock.c index 53ab2c4..81d5a11 100644 --- a/libcfs/libcfs/user-lock.c +++ b/libcfs/libcfs/user-lock.c @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -71,41 +71,41 @@ * No-op implementation. */ -void spin_lock_init(spinlock_t *lock) +void cfs_spin_lock_init(cfs_spinlock_t *lock) { LASSERT(lock != NULL); (void)lock; } -void spin_lock(spinlock_t *lock) +void cfs_spin_lock(cfs_spinlock_t *lock) { (void)lock; } -void spin_unlock(spinlock_t *lock) +void cfs_spin_unlock(cfs_spinlock_t *lock) { (void)lock; } -int spin_trylock(spinlock_t *lock) +int cfs_spin_trylock(cfs_spinlock_t *lock) { (void)lock; return 1; } -void spin_lock_bh_init(spinlock_t *lock) +void cfs_spin_lock_bh_init(cfs_spinlock_t *lock) { LASSERT(lock != NULL); (void)lock; } -void spin_lock_bh(spinlock_t *lock) +void cfs_spin_lock_bh(cfs_spinlock_t *lock) { LASSERT(lock != NULL); (void)lock; } -void spin_unlock_bh(spinlock_t *lock) +void cfs_spin_unlock_bh(cfs_spinlock_t *lock) { LASSERT(lock != NULL); (void)lock; @@ -119,20 +119,27 @@ void spin_unlock_bh(spinlock_t *lock) * - __up(x) */ -void sema_init(struct semaphore *s, int val) +void cfs_sema_init(cfs_semaphore_t *s, int val) { LASSERT(s != NULL); (void)s; (void)val; } -void __down(struct semaphore *s) +void __down(cfs_semaphore_t *s) { LASSERT(s != NULL); (void)s; } -void __up(struct semaphore *s) +int __down_interruptible(cfs_semaphore_t *s) +{ + LASSERT(s != NULL); + (void)s; + return 0; +} + +void __up(cfs_semaphore_t *s) { LASSERT(s != NULL); (void)s; @@ -149,43 +156,46 @@ void __up(struct semaphore *s) static cfs_wait_handler_t wait_handler; -void init_completion_module(cfs_wait_handler_t handler) +void cfs_init_completion_module(cfs_wait_handler_t handler) { wait_handler = handler; } -void init_completion(struct completion *c) +int cfs_call_wait_handler(int timeout) +{ + if (!wait_handler) + return -ENOSYS; + return wait_handler(timeout); +} + +void cfs_init_completion(cfs_completion_t *c) { LASSERT(c != NULL); c->done = 0; cfs_waitq_init(&c->wait); } -void complete(struct completion *c) +void cfs_complete(cfs_completion_t *c) { LASSERT(c != NULL); c->done = 1; cfs_waitq_signal(&c->wait); } -void wait_for_completion(struct completion *c) +void cfs_wait_for_completion(cfs_completion_t *c) { LASSERT(c != NULL); do { - if (wait_handler) - wait_handler(1000); - else + if (cfs_call_wait_handler(1000) < 0) break; } while (c->done == 0); } -int wait_for_completion_interruptible(struct completion *c) +int cfs_wait_for_completion_interruptible(cfs_completion_t *c) { LASSERT(c != NULL); do { - if (wait_handler) - wait_handler(1000); - else + if (cfs_call_wait_handler(1000) < 0) break; } while (c->done == 0); return 0; @@ -202,45 +212,51 @@ int wait_for_completion_interruptible(struct completion *c) * - up_write(x) */ -void init_rwsem(struct rw_semaphore *s) +void cfs_init_rwsem(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; } -void down_read(struct rw_semaphore *s) +void cfs_down_read(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; } -int down_read_trylock(struct rw_semaphore *s) +int cfs_down_read_trylock(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; return 1; } -void down_write(struct rw_semaphore *s) +void cfs_down_write(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; } -int down_write_trylock(struct rw_semaphore *s) +int cfs_down_write_trylock(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; return 1; } -void up_read(struct rw_semaphore *s) +void cfs_up_read(cfs_rw_semaphore_t *s) +{ + LASSERT(s != NULL); + (void)s; +} + +void cfs_up_write(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; } -void up_write(struct rw_semaphore *s) +void cfs_fini_rwsem(cfs_rw_semaphore_t *s) { LASSERT(s != NULL); (void)s; @@ -249,10 +265,10 @@ void up_write(struct rw_semaphore *s) #ifdef HAVE_LIBPTHREAD /* - * Completion + * Multi-threaded user space completion */ -void cfs_init_completion(struct cfs_completion *c) +void cfs_mt_init_completion(cfs_mt_completion_t *c) { LASSERT(c != NULL); c->c_done = 0; @@ -260,14 +276,14 @@ void cfs_init_completion(struct cfs_completion *c) pthread_cond_init(&c->c_cond, NULL); } -void cfs_fini_completion(struct cfs_completion *c) +void cfs_mt_fini_completion(cfs_mt_completion_t *c) { LASSERT(c != NULL); pthread_mutex_destroy(&c->c_mut); pthread_cond_destroy(&c->c_cond); } -void cfs_complete(struct cfs_completion *c) +void cfs_mt_complete(cfs_mt_completion_t *c) { LASSERT(c != NULL); pthread_mutex_lock(&c->c_mut); @@ -276,7 +292,7 @@ void cfs_complete(struct cfs_completion *c) pthread_mutex_unlock(&c->c_mut); } -void cfs_wait_for_completion(struct cfs_completion *c) +void cfs_mt_wait_for_completion(cfs_mt_completion_t *c) { LASSERT(c != NULL); pthread_mutex_lock(&c->c_mut); @@ -287,12 +303,12 @@ void cfs_wait_for_completion(struct cfs_completion *c) } /* - * atomic primitives + * Multi-threaded user space atomic primitives */ static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER; -int cfs_atomic_read(cfs_atomic_t *a) +int cfs_mt_atomic_read(cfs_mt_atomic_t *a) { int r; @@ -302,14 +318,14 @@ int cfs_atomic_read(cfs_atomic_t *a) return r; } -void cfs_atomic_set(cfs_atomic_t *a, int b) +void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b) { pthread_mutex_lock(&atomic_guard_lock); a->counter = b; pthread_mutex_unlock(&atomic_guard_lock); } -int cfs_atomic_dec_and_test(cfs_atomic_t *a) +int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a) { int r; @@ -319,20 +335,20 @@ int cfs_atomic_dec_and_test(cfs_atomic_t *a) return (r == 0); } -void cfs_atomic_inc(cfs_atomic_t *a) +void cfs_mt_atomic_inc(cfs_mt_atomic_t *a) { pthread_mutex_lock(&atomic_guard_lock); ++a->counter; pthread_mutex_unlock(&atomic_guard_lock); } -void cfs_atomic_dec(cfs_atomic_t *a) +void cfs_mt_atomic_dec(cfs_mt_atomic_t *a) { pthread_mutex_lock(&atomic_guard_lock); --a->counter; pthread_mutex_unlock(&atomic_guard_lock); } -void cfs_atomic_add(int b, cfs_atomic_t *a) +void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a) { pthread_mutex_lock(&atomic_guard_lock); @@ -340,7 +356,7 @@ void cfs_atomic_add(int b, cfs_atomic_t *a) pthread_mutex_unlock(&atomic_guard_lock); } -void cfs_atomic_sub(int b, cfs_atomic_t *a) +void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a) { pthread_mutex_lock(&atomic_guard_lock); a->counter -= b;