#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#endif
+
#define LASSERT_SEM_LOCKED(sem) LASSERT(down_trylock(sem) != 0)
+#define LASSERT_MUTEX_LOCKED(x) LASSERT(mutex_is_locked(x))
#ifdef HAVE_SEM_COUNT_ATOMIC
#define SEM_COUNT(sem) (atomic_read(&(sem)->count))
*/
typedef struct rw_semaphore cfs_rw_semaphore_t;
-#define cfs_sema_init(s, val) sema_init(s, val)
#define cfs_init_rwsem(s) init_rwsem(s)
#define cfs_down_read(s) down_read(s)
#define cfs_down_read_trylock(s) down_read_trylock(s)
#define CFS_DECLARE_RWSEM(name) DECLARE_RWSEM(name)
/*
- * semaphore "implementation" (use Linux kernel's primitives)
- */
-typedef struct semaphore cfs_semaphore_t;
-
-/*
* rwlock_t "implementation" (use Linux kernel's primitives)
*
* - rwlock_init(x)
#define cfs_fini_completion(c) do { } while (0)
/*
+ * semaphore "implementation" (use Linux kernel's primitives)
+ * - DEFINE_SEMAPHORE(name)
+ * - sema_init(sem, val)
+ * - up(sem)
+ * - down(sem)
+ * - down_interruptible(sem)
+ * - down_trylock(sem)
+ */
+typedef struct semaphore cfs_semaphore_t;
+
+#ifdef DEFINE_SEMAPHORE
+#define CFS_DEFINE_SEMAPHORE(name) DEFINE_SEMAPHORE(name)
+#else
+#define CFS_DEFINE_SEMAPHORE(name) DECLARE_MUTEX(name)
+#endif
+
+#define cfs_sema_init(sem, val) sema_init(sem, val)
+#define cfs_up(x) up(x)
+#define cfs_down(x) down(x)
+#define cfs_down_interruptible(x) down_interruptible(x)
+#define cfs_down_trylock(x) down_trylock(x)
+
+/*
* mutex "implementation" (use Linux kernel's primitives)
*
- * - DECLARE_MUTEX(name)
+ * - DEFINE_MUTEX(name)
* - mutex_init(x)
- * - init_mutex(x)
- * - init_mutex_locked(x)
- * - init_MUTEX_LOCKED(x)
- * - mutex_up(x)
- * - mutex_down(x)
- * - up(x)
- * - down(x)
- * - mutex_down_trylock(x)
* - mutex_lock(x)
* - mutex_unlock(x)
+ * - mutex_trylock(x)
+ * - mutex_is_locked(x)
+ * - mutex_destroy(x)
*/
typedef struct mutex cfs_mutex_t;
#define CFS_DEFINE_MUTEX(name) DEFINE_MUTEX(name)
-#define CFS_DECLARE_MUTEX(name) DECLARE_MUTEX(name)
#define cfs_mutex_init(x) mutex_init(x)
-#define cfs_init_mutex(x) init_MUTEX(x)
-#define cfs_init_mutex_locked(x) init_MUTEX_LOCKED(x)
-#define cfs_mutex_up(x) up(x)
-#define cfs_mutex_down(x) down(x)
-#define cfs_up(x) up(x)
-#define cfs_down(x) down(x)
-#define cfs_down_interruptible(x) down_interruptible(x)
-#define cfs_mutex_down_trylock(x) down_trylock(x)
#define cfs_mutex_lock(x) mutex_lock(x)
#define cfs_mutex_unlock(x) mutex_unlock(x)
+#define cfs_mutex_lock_interruptible(x) mutex_lock_interruptible(x)
#define cfs_mutex_trylock(x) mutex_trylock(x)
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-
-/**************************************************************************
- *
- * Mutex interface from newer Linux kernels.
- *
- * this augments compatibility interface from include/linux/mutex.h
- *
- **************************************************************************/
-
-static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
-{
-}
-
-/*
- * This is for use in assertions _only_, i.e., this function should always
- * return 1.
- *
- * \retval 1 mutex is locked.
- *
- * \retval 0 mutex is not locked. This should never happen.
- */
-static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
-{
- return 1;
-}
-#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) */
-#define cfs_mutex_destroy(x) mutex_destroy(x)
-#define cfs_mutex_is_locked(x) mutex_is_locked(x)
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) */
+#define cfs_mutex_is_locked(x) mutex_is_locked(x)
+#define cfs_mutex_destroy(x) mutex_destroy(x)
/*
* Kernel locking primitives
#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
+#define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
void cfs_spin_lock_init(cfs_spinlock_t *lock);
void cfs_spin_lock(cfs_spinlock_t *lock);
} cfs_semaphore_t;
void cfs_sema_init(cfs_semaphore_t *s, int val);
-void __down(cfs_semaphore_t *s);
void __up(cfs_semaphore_t *s);
+void __down(cfs_semaphore_t *s);
+int __down_interruptible(cfs_semaphore_t *s);
+
+#define CFS_DEFINE_SEMAPHORE(name) cfs_semaphore_t name = { 1 }
+
+#define cfs_up(s) __up(s)
+#define cfs_down(s) __down(s)
+#define cfs_down_interruptible(s) __down_interruptible(s)
+
+static inline int cfs_down_trylock(cfs_semaphore_t *sem)
+{
+ return 0;
+}
/*
* Completion:
* Mutex interface.
*
**************************************************************************/
-#define CFS_DECLARE_MUTEX(name) \
- cfs_semaphore_t name = { 1 }
-
-#define cfs_mutex_up(s) __up(s)
-#define cfs_up(s) cfs_mutex_up(s)
-#define cfs_mutex_down(s) __down(s)
-#define cfs_down(s) cfs_mutex_down(s)
-#define cfs_mutex_down_interruptible(s) __down_interruptible(s)
-#define cfs_down_interruptible(s) cfs_mutex_down_interruptible(s)
+typedef struct cfs_semaphore cfs_mutex_t;
-#define cfs_init_mutex(x) cfs_sema_init(x, 1)
-#define cfs_init_mutex_locked(x) cfs_sema_init(x, 0)
-
-typedef struct cfs_mutex {
- cfs_semaphore_t m_sem;
-} cfs_mutex_t;
-
-#define CFS_DEFINE_MUTEX(m) cfs_mutex_t m
+#define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
static inline void cfs_mutex_init(cfs_mutex_t *mutex)
{
- cfs_init_mutex(&mutex->m_sem);
+ cfs_sema_init(mutex, 1);
}
static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
{
- cfs_mutex_down(&mutex->m_sem);
+ cfs_down(mutex);
}
static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
{
- cfs_mutex_up(&mutex->m_sem);
+ cfs_up(mutex);
}
-/**
- * Try-lock this mutex.
- *
- *
- * \retval 0 try-lock succeeded (lock acquired).
- * \retval errno indicates lock contention.
- */
-static inline int cfs_mutex_down_trylock(cfs_mutex_t *mutex)
+static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
{
- return 0;
+ return cfs_down_interruptible(mutex);
}
/**
*/
static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
{
- return !cfs_mutex_down_trylock(mutex);
+ return !cfs_down_trylock(mutex);
}
static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
extern cfs_psdev_t libcfs_dev;
extern cfs_rw_semaphore_t cfs_tracefile_sem;
-extern cfs_semaphore_t cfs_trace_thread_sem;
+extern cfs_mutex_t cfs_trace_thread_mutex;
extern void libcfs_init_nidstrings(void);
extern int libcfs_arch_init(void);
libcfs_arch_init();
libcfs_init_nidstrings();
cfs_init_rwsem(&cfs_tracefile_sem);
- cfs_init_mutex(&cfs_trace_thread_sem);
+ cfs_mutex_init(&cfs_trace_thread_mutex);
cfs_init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
cfs_waitq_init(&cfs_race_waitq);
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
-cfs_semaphore_t cfs_trace_thread_sem;
+cfs_mutex_t cfs_trace_thread_mutex;
static int thread_running = 0;
cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
struct tracefiled_ctl *tctl = &trace_tctl;
int rc = 0;
- cfs_mutex_down(&cfs_trace_thread_sem);
+ cfs_mutex_lock(&cfs_trace_thread_mutex);
if (thread_running)
goto out;
cfs_wait_for_completion(&tctl->tctl_start);
thread_running = 1;
out:
- cfs_mutex_up(&cfs_trace_thread_sem);
+ cfs_mutex_unlock(&cfs_trace_thread_mutex);
return rc;
}
{
struct tracefiled_ctl *tctl = &trace_tctl;
- cfs_mutex_down(&cfs_trace_thread_sem);
+ cfs_mutex_lock(&cfs_trace_thread_mutex);
if (thread_running) {
printk(CFS_KERN_INFO
"Lustre: shutting down debug daemon thread...\n");
cfs_wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
- cfs_mutex_up(&cfs_trace_thread_sem);
+ cfs_mutex_unlock(&cfs_trace_thread_mutex);
}
int cfs_tracefile_init(int max_pages)
* When it hits 0, we stop the dispatcher.
*/
static __u32 lcw_refcount = 0;
-static CFS_DECLARE_MUTEX(lcw_refcount_sem);
+static CFS_DEFINE_MUTEX(lcw_refcount_mutex);
/*
* List of timers that have fired that need their callbacks run by the
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
- cfs_down(&lcw_refcount_sem);
+ cfs_mutex_lock(&lcw_refcount_mutex);
if (++lcw_refcount == 1)
lcw_dispatch_start();
- cfs_up(&lcw_refcount_sem);
+ cfs_mutex_unlock(&lcw_refcount_mutex);
/* Keep this working in case we enable them by default */
if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
if (dead)
LIBCFS_FREE(lcw, sizeof(*lcw));
- cfs_down(&lcw_refcount_sem);
+ cfs_mutex_lock(&lcw_refcount_mutex);
if (--lcw_refcount == 0)
lcw_dispatch_stop();
- cfs_up(&lcw_refcount_sem);
+ cfs_mutex_unlock(&lcw_refcount_mutex);
EXIT;
}
#ifdef __KERNEL__
#define LNET_LOCK() cfs_spin_lock(&the_lnet.ln_lock)
#define LNET_UNLOCK() cfs_spin_unlock(&the_lnet.ln_lock)
-#define LNET_MUTEX_DOWN(m) cfs_mutex_down(m)
-#define LNET_MUTEX_UP(m) cfs_mutex_up(m)
+#define LNET_MUTEX_LOCK(m) cfs_mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m) cfs_mutex_unlock(m)
#else
# ifndef HAVE_LIBPTHREAD
#define LNET_SINGLE_THREADED_LOCK(l) \
#define LNET_LOCK() LNET_SINGLE_THREADED_LOCK(the_lnet.ln_lock)
#define LNET_UNLOCK() LNET_SINGLE_THREADED_UNLOCK(the_lnet.ln_lock)
-#define LNET_MUTEX_DOWN(m) LNET_SINGLE_THREADED_LOCK(*(m))
-#define LNET_MUTEX_UP(m) LNET_SINGLE_THREADED_UNLOCK(*(m))
+#define LNET_MUTEX_LOCK(m) LNET_SINGLE_THREADED_LOCK(*(m))
+#define LNET_MUTEX_UNLOCK(m) LNET_SINGLE_THREADED_UNLOCK(*(m))
# else
#define LNET_LOCK() pthread_mutex_lock(&the_lnet.ln_lock)
#define LNET_UNLOCK() pthread_mutex_unlock(&the_lnet.ln_lock)
-#define LNET_MUTEX_DOWN(m) pthread_mutex_lock(m)
-#define LNET_MUTEX_UP(m) pthread_mutex_unlock(m)
+#define LNET_MUTEX_LOCK(m) pthread_mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m) pthread_mutex_unlock(m)
# endif
#endif
#ifdef __KERNEL__
cfs_spinlock_t ln_lock;
cfs_waitq_t ln_waitq;
- cfs_semaphore_t ln_api_mutex;
- cfs_semaphore_t ln_lnd_mutex;
+ cfs_mutex_t ln_api_mutex;
+ cfs_mutex_t ln_lnd_mutex;
#else
# ifndef HAVE_LIBPTHREAD
int ln_lock;
lnet_ping_info_t *ln_ping_info;
#ifdef __KERNEL__
- cfs_semaphore_t ln_rc_signal; /* serialise startup/shutdown */
+ cfs_semaphore_t ln_rc_signal; /* serialise startup/shutdown */
#endif
- int ln_rc_state; /* router checker startup/shutdown state */
- lnet_handle_eq_t ln_rc_eqh; /* router checker's event queue */
- lnet_handle_md_t ln_rc_mdh;
- cfs_list_t ln_zombie_rcd;
+ int ln_rc_state; /* router checker startup/shutdown state */
+ lnet_handle_eq_t ln_rc_eqh; /* router checker's event queue */
+ lnet_handle_md_t ln_rc_mdh;
+ cfs_list_t ln_zombie_rcd;
#ifdef LNET_USE_LIB_FREELIST
lnet_freelist_t ln_free_mes;
cfs_daemonize("mxlnd_tx_queued");
while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
- ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
+ ret = cfs_down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
if (ret != 0) // Should we check for -EINTR?
cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
- ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
+ ret = cfs_down_interruptible(&kmxlnd_data.kmx_conn_sem);
if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
#include "ptllnd.h"
#ifdef CRAY_XT3
-static cfs_semaphore_t ptltrace_mutex;
+static cfs_mutex_t ptltrace_mutex;
static cfs_waitq_t ptltrace_debug_ctlwq;
void
libcfs_daemonize("kpt_ptltrace_dump");
/* serialise with other instances of me */
- cfs_mutex_down(&ptltrace_mutex);
+ cfs_mutex_lock(&ptltrace_mutex);
snprintf(fname, sizeof(fname), "%s.%ld.%ld",
*kptllnd_tunables.kptl_ptltrace_basename,
kptllnd_ptltrace_to_file(fname);
- cfs_mutex_up(&ptltrace_mutex);
+ cfs_mutex_unlock(&ptltrace_mutex);
/* unblock my creator */
cfs_waitq_signal(&ptltrace_debug_ctlwq);
kptllnd_init_ptltrace(void)
{
cfs_waitq_init(&ptltrace_debug_ctlwq);
- cfs_init_mutex(&ptltrace_mutex);
+ cfs_mutex_init(&ptltrace_mutex);
}
#endif
{
cfs_spin_lock_init (&the_lnet.ln_lock);
cfs_waitq_init (&the_lnet.ln_waitq);
- cfs_init_mutex(&the_lnet.ln_lnd_mutex);
- cfs_init_mutex(&the_lnet.ln_api_mutex);
+ cfs_mutex_init(&the_lnet.ln_lnd_mutex);
+ cfs_mutex_init(&the_lnet.ln_api_mutex);
}
void
void
lnet_register_lnd (lnd_t *lnd)
{
- LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
LASSERT (the_lnet.ln_init);
LASSERT (libcfs_isknown_lnd(lnd->lnd_type));
CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
void
lnet_unregister_lnd (lnd_t *lnd)
{
- LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
LASSERT (the_lnet.ln_init);
LASSERT (lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
cfs_list_del (&lnd->lnd_list);
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
}
#ifndef LNET_USE_LIB_FREELIST
goto failed;
}
- LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
#ifdef __KERNEL__
if (lnd == NULL) {
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
rc = cfs_request_module("%s",
libcfs_lnd2modname(lnd_type));
- LNET_MUTEX_DOWN(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex);
lnd = lnet_find_lnd_by_type(lnd_type);
if (lnd == NULL) {
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
CERROR("Can't load LND %s, module %s, rc=%d\n",
libcfs_lnd2str(lnd_type),
libcfs_lnd2modname(lnd_type), rc);
}
#else
if (lnd == NULL) {
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
CERROR("LND %s not supported\n",
libcfs_lnd2str(lnd_type));
goto failed;
rc = (lnd->lnd_startup)(ni);
- LNET_MUTEX_UP(&the_lnet.ln_lnd_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex);
if (rc != 0) {
LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s"
int im_a_router = 0;
int rc;
- LNET_MUTEX_DOWN(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
LASSERT (the_lnet.ln_init);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
failed0:
LASSERT (rc < 0);
out:
- LNET_MUTEX_UP(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
return rc;
}
int
LNetNIFini()
{
- LNET_MUTEX_DOWN(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
lnet_unprepare();
}
- LNET_MUTEX_UP(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
return 0;
}
CFS_MODULE_PARM(config_on_load, "i", int, 0444,
"configure network at module load");
-static cfs_semaphore_t lnet_config_mutex;
+static cfs_mutex_t lnet_config_mutex;
int
lnet_configure (void *arg)
/* 'arg' only there so I can be passed to cfs_create_thread() */
int rc = 0;
- LNET_MUTEX_DOWN(&lnet_config_mutex);
+ LNET_MUTEX_LOCK(&lnet_config_mutex);
if (!the_lnet.ln_niinit_self) {
rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
}
}
- LNET_MUTEX_UP(&lnet_config_mutex);
+ LNET_MUTEX_UNLOCK(&lnet_config_mutex);
return rc;
}
{
int refcount;
- LNET_MUTEX_DOWN(&lnet_config_mutex);
+ LNET_MUTEX_LOCK(&lnet_config_mutex);
if (the_lnet.ln_niinit_self) {
the_lnet.ln_niinit_self = 0;
LNetNIFini();
}
- LNET_MUTEX_DOWN(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
refcount = the_lnet.ln_refcount;
- LNET_MUTEX_UP(&the_lnet.ln_api_mutex);
+ LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex);
- LNET_MUTEX_UP(&lnet_config_mutex);
+ LNET_MUTEX_UNLOCK(&lnet_config_mutex);
return (refcount == 0) ? 0 : -EBUSY;
}
int rc;
ENTRY;
- cfs_init_mutex(&lnet_config_mutex);
+ cfs_mutex_init(&lnet_config_mutex);
rc = LNetInit();
if (rc != 0) {
LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
#ifdef __KERNEL__
- cfs_mutex_up(&the_lnet.ln_rc_signal);
+ cfs_up(&the_lnet.ln_rc_signal);
#endif
return;
}
return 0;
#ifdef __KERNEL__
- cfs_init_mutex_locked(&the_lnet.ln_rc_signal);
+ cfs_sema_init(&the_lnet.ln_rc_signal, 0);
/* EQ size doesn't matter; the callback is guaranteed to get every
* event */
eqsz = 1;
rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
LASSERT (rc == 0);
/* block until event callback signals exit */
- cfs_mutex_down(&the_lnet.ln_rc_signal);
+ cfs_down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
#ifdef __KERNEL__
/* block until event callback signals exit */
- cfs_mutex_down(&the_lnet.ln_rc_signal);
+ cfs_down(&the_lnet.ln_rc_signal);
#else
while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
lnet_router_checker();
return -EFAULT;
}
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
console_session.ses_laststamp = cfs_time_current_sec();
sizeof(lstcon_trans_stat_t)))
rc = -EFAULT;
out:
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
LIBCFS_FREE(buf, data->ioc_plen1);
lstcon_rpc_post(crpc);
}
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
lstcon_rpc_trans_check(trans),
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
if (console_session.ses_shutdown)
rc = -ESHUTDOWN;
/* RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
if (console_session.ses_shutdown || console_session.ses_expired) {
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
return;
}
}
if (console_session.ses_expired) {
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
return;
}
ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
}
int
cfs_waitq_signal(&trans->tas_waitq);
}
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
CWARN("Session is shutting down, "
"waiting for termination of transactions\n");
cfs_pause(cfs_time_seconds(1));
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
}
cfs_spin_lock(&console_session.ses_rpc_lock);
sfw_unpack_message(req);
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
jrep->join_sid = console_session.ses_id;
if (grp != NULL)
lstcon_group_put(grp);
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
return rc;
}
console_session.ses_expired = 0;
console_session.ses_laststamp = cfs_time_current_sec();
- cfs_init_mutex(&console_session.ses_mutex);
+ cfs_mutex_init(&console_session.ses_mutex);
CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
libcfs_deregister_ioctl(&lstcon_ioctl_handler);
- cfs_mutex_down(&console_session.ses_mutex);
+ cfs_mutex_lock(&console_session.ses_mutex);
srpc_shutdown_service(&lstcon_acceptor_service);
srpc_remove_service(&lstcon_acceptor_service);
lstcon_rpc_module_fini();
- cfs_mutex_up(&console_session.ses_mutex);
+ cfs_mutex_unlock(&console_session.ses_mutex);
LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
LASSERT (cfs_list_empty(&console_session.ses_grp_list));
#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
typedef struct {
- cfs_semaphore_t ses_mutex; /* lock for session, only one thread can enter session */
+ cfs_mutex_t ses_mutex; /* lock for session, only one thread can enter session */
lst_sid_t ses_id; /* global session id */
int ses_key; /* local session key */
int ses_state; /* state of session */
* Ask client for new range, assign that range to ->seq_space and write
* seq state to backing store should be atomic.
*/
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
if (cli == NULL) {
CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
cli->lcs_space.lsr_index = seq->lss_site->ms_node_id;
EXIT;
out_up:
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
return rc;
}
EXPORT_SYMBOL(seq_server_set_cli);
int rc;
ENTRY;
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = __seq_server_alloc_super(seq, out, env);
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
int rc;
ENTRY;
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = __seq_server_alloc_meta(seq, out, env);
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
range_init(&seq->lss_hiwater_set);
seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
- cfs_sema_init(&seq->lss_sem, 1);
+ cfs_mutex_init(&seq->lss_mutex);
seq->lss_width = is_srv ?
LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
int rc;
ENTRY;
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
#ifdef __KERNEL__
if (seq->lcs_srv) {
#ifdef __KERNEL__
}
#endif
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
if (seq->lcs_update) {
cfs_waitq_add(&seq->lcs_waitq, link);
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
cfs_waitq_wait(link, CFS_TASK_UNINT);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
cfs_waitq_del(&seq->lcs_waitq, link);
cfs_set_current_state(CFS_TASK_RUNNING);
return -EAGAIN;
}
++seq->lcs_update;
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
return 0;
}
static void seq_fid_alloc_fini(struct lu_client_seq *seq)
{
LASSERT(seq->lcs_update == 1);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
--seq->lcs_update;
cfs_waitq_signal(&seq->lcs_waitq);
}
int rc;
LASSERT(seqnr != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
cfs_waitlink_init(&link);
while (1) {
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
return rc;
}
* to setup FLD for it.
*/
seq_fid_alloc_fini(seq);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
return rc;
}
LASSERT(fid != NULL);
cfs_waitlink_init(&link);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
while (1) {
seqno_t seqnr;
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
}
*fid = seq->lcs_fid;
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
RETURN(rc);
LASSERT(seq != NULL);
cfs_waitlink_init(&link);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
while (seq->lcs_update) {
cfs_waitq_add(&seq->lcs_waitq, &link);
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
cfs_waitq_wait(&link, CFS_TASK_UNINT);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
cfs_waitq_del(&seq->lcs_waitq, &link);
cfs_set_current_state(CFS_TASK_RUNNING);
}
seq->lcs_space.lsr_index = -1;
range_init(&seq->lcs_space);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
}
EXPORT_SYMBOL(seq_client_flush);
seq->lcs_exp = exp;
seq->lcs_srv = srv;
seq->lcs_type = type;
- cfs_sema_init(&seq->lcs_sem, 1);
+ cfs_mutex_init(&seq->lcs_mutex);
seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
cfs_waitq_init(&seq->lcs_waitq);
LASSERT(seq != NULL);
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lss_space);
if (rc == 0) {
seq->lss_name, PRANGE(&seq->lss_space));
}
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lss_space);
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
seq->lss_name, seq->lss_width);
}
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lss_sem);
+ cfs_mutex_lock(&seq->lss_mutex);
rc = snprintf(page, count, LPU64"\n", seq->lss_width);
- cfs_up(&seq->lss_sem);
+ cfs_mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lcs_space);
seq->lcs_name, PRANGE(&seq->lcs_space));
}
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lcs_space);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc) {
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
}
}
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
rc = snprintf(page, count, LPU64"\n", seq->lcs_width);
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_down(&seq->lcs_sem);
+ cfs_mutex_lock(&seq->lcs_mutex);
rc = snprintf(page, count, DFID"\n", PFID(&seq->lcs_fid));
- cfs_up(&seq->lcs_sem);
+ cfs_mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
int (* fs_map_inode_pages)(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- cfs_semaphore_t *sem);
+ cfs_mutex_t *sem);
int (* fs_write_record)(struct file *, void *, int size, loff_t *,
int force_sync);
int (* fs_read_record)(struct file *, void *, int size, loff_t *);
struct inode *inode,
struct page **page, int pages,
unsigned long *blocks, int *created,
- int create, cfs_semaphore_t *sem)
+ int create, cfs_mutex_t *mutex)
{
return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
- created, create, sem);
+ created, create, mutex);
}
static inline int fsfilt_read_record(struct obd_device *obd, struct file *file,
* lr_lock
* ns_lock
*
- * lr_lvb_sem
+ * lr_lvb_mutex
* lr_lock
*
*/
/* Server-side-only lock value block elements */
/** to serialize lvbo_init */
- cfs_semaphore_t lr_lvb_sem;
+ cfs_mutex_t lr_lvb_mutex;
__u32 lr_lvb_len;
/** protect by lr_lock */
void *lr_lvb_data;
*/
struct tg_export_data {
/** Protects led_lcd below */
- cfs_semaphore_t ted_lcd_lock;
+ cfs_mutex_t ted_lcd_lock;
/** Per-client data for each export */
struct lsd_client_data *ted_lcd;
/** Offset of record in last_rcvd file */
cfs_spinlock_t med_open_lock; /* lock med_open_head, mfd_list*/
/** Bitmask of all ibit locks this MDT understands */
__u64 med_ibits_known;
- cfs_semaphore_t med_idmap_sem;
+ cfs_mutex_t med_idmap_mutex;
struct lustre_idmap_table *med_idmap;
};
struct lu_client_seq {
/* Sequence-controller export. */
struct obd_export *lcs_exp;
- cfs_semaphore_t lcs_sem;
+ cfs_mutex_t lcs_mutex;
/*
* Range of allowed for allocation sequeces. When using lu_client_seq on
/* Client interafce to request controller */
struct lu_client_seq *lss_cli;
- /* Semaphore for protecting allocation */
- cfs_semaphore_t lss_sem;
+ /* Mutex for protecting allocation */
+ cfs_mutex_t lss_mutex;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
* @{
*/
struct ptlrpc_sec *imp_sec;
- cfs_semaphore_t imp_sec_mutex;
+ cfs_mutex_t imp_sec_mutex;
cfs_time_t imp_sec_expire;
/** @} */
* under ->lco_lock.
*/
__u64 lco_flags;
- cfs_semaphore_t lco_lock;
+ cfs_mutex_t lco_lock;
struct obd_export *lco_md_exp;
struct obd_export *lco_dt_exp;
};
struct llog_handle *loc_handle;
struct llog_commit_master *loc_lcm;
struct llog_canceld_ctxt *loc_llcd;
- cfs_semaphore_t loc_sem; /* protects loc_llcd and loc_imp */
+ cfs_mutex_t loc_mutex; /* protects loc_llcd and loc_imp */
cfs_atomic_t loc_refcount;
void *llog_proc_cb;
long loc_flags; /* flags, see above defines */
{
cfs_waitq_init(&olg->olg_waitq);
cfs_spin_lock_init(&olg->olg_lock);
- cfs_sema_init(&olg->olg_cat_processing, 1);
+ cfs_mutex_init(&olg->olg_cat_processing);
olg->olg_seq = group;
}
struct obd_device;
struct mdc_rpc_lock {
- cfs_semaphore_t rpcl_sem;
+ cfs_mutex_t rpcl_mutex;
struct lookup_intent *rpcl_it;
};
static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
{
- cfs_sema_init(&lck->rpcl_sem, 1);
+ cfs_mutex_init(&lck->rpcl_mutex);
lck->rpcl_it = NULL;
}
{
ENTRY;
if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
- cfs_down(&lck->rpcl_sem);
+ cfs_mutex_lock(&lck->rpcl_mutex);
LASSERT(lck->rpcl_it == NULL);
lck->rpcl_it = it;
}
if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) {
LASSERT(it == lck->rpcl_it);
lck->rpcl_it = NULL;
- cfs_up(&lck->rpcl_sem);
+ cfs_mutex_unlock(&lck->rpcl_mutex);
}
EXIT;
}
/** Hash list in memory, protect by dquot_hash_lock */
cfs_list_t dq_hash;
/** Protect the data in lustre_dquot */
- cfs_semaphore_t dq_sem;
+ cfs_mutex_t dq_mutex;
/** Use count */
cfs_atomic_t dq_refcnt;
/** Pointer of quota info it belongs to */
cfs_dentry_t *fo_dentry_O;
cfs_dentry_t **fo_dentry_O_groups;
struct filter_subdirs *fo_dentry_O_sub;
- cfs_semaphore_t fo_init_lock; /* group initialization lock */
+ cfs_mutex_t fo_init_lock; /* group initialization lock */
int fo_committed_group;
cfs_spinlock_t fo_objidlock; /* protect fo_lastobjid */
unsigned long fo_destroys_in_progress;
- cfs_semaphore_t fo_create_locks[FILTER_SUBDIR_COUNT];
+ cfs_mutex_t fo_create_locks[FILTER_SUBDIR_COUNT];
cfs_list_t fo_export_list;
int fo_subdir_count;
__u64 *fo_last_objids; /* last created objid for groups,
* protected by fo_objidlock */
- cfs_semaphore_t fo_alloc_lock;
+ cfs_mutex_t fo_alloc_lock;
cfs_atomic_t fo_r_in_flight;
cfs_atomic_t fo_w_in_flight;
cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
cfs_time_t cl_next_shrink_grant; /* jiffies */
cfs_list_t cl_grant_shrink_list; /* Timeout event list */
- cfs_semaphore_t cl_grant_sem; /*grant shrink list cfs_semaphore*/
int cl_grant_shrink_interval; /* seconds */
/* keep track of objects that have lois that contain pages which
struct super_block *mgs_sb;
struct dentry *mgs_configs_dir;
cfs_list_t mgs_fs_db_list;
- cfs_semaphore_t mgs_sem;
+ cfs_mutex_t mgs_mutex;
cfs_proc_dir_entry_t *mgs_proc_live;
cfs_time_t mgs_start_time;
};
int mds_max_cookiesize;
__u64 mds_io_epoch;
unsigned long mds_atime_diff;
- cfs_semaphore_t mds_epoch_sem;
struct ll_fid mds_rootfid;
cfs_dentry_t *mds_pending_dir;
cfs_dentry_t *mds_logs_dir;
struct lustre_quota_info mds_quota_info;
cfs_rw_semaphore_t mds_qonoff_sem;
- cfs_semaphore_t mds_health_sem;
unsigned long mds_fl_user_xattr:1,
mds_fl_acl:1,
mds_evict_ost_nids:1,
struct ptlrpc_service *ost_service;
struct ptlrpc_service *ost_create_service;
struct ptlrpc_service *ost_io_service;
- cfs_semaphore_t ost_health_sem;
+ cfs_mutex_t ost_health_mutex;
};
struct echo_client_obd {
struct lov_tgt_desc **lov_tgts; /* sparse array */
struct ost_pool lov_packed; /* all OSTs in a packed
array */
- cfs_semaphore_t lov_lock;
+ cfs_mutex_t lov_lock;
struct obd_connect_data lov_ocd;
struct lov_qos lov_qos; /* qos info per lov */
cfs_atomic_t lov_refcount;
struct obd_export *ltd_exp;
int ltd_active; /* is this target up for requests */
int ltd_idx;
- cfs_semaphore_t ltd_fid_sem;
+ cfs_mutex_t ltd_fid_mutex;
};
enum placement_policy {
int max_def_easize;
int max_cookiesize;
int server_timeout;
- cfs_semaphore_t init_sem;
+ cfs_mutex_t init_mutex;
struct lmv_tgt_desc *tgts;
int tgts_size;
cfs_spinlock_t olg_lock;
struct obd_export *olg_exp;
int olg_initializing;
- cfs_semaphore_t olg_cat_processing;
+ cfs_mutex_t olg_cat_processing;
};
/* corresponds to one of the obd's */
struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
/* a spinlock is OK for what we do now, may need a semaphore later */
cfs_spinlock_t obd_dev_lock; /* protects obd bitfield above */
- cfs_semaphore_t obd_dev_sem;
+ cfs_mutex_t obd_dev_mutex;
__u64 obd_last_committed;
struct fsfilt_operations *obd_fsops;
cfs_spinlock_t obd_osfs_lock;
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
lco->lco_flags, flags);
- cfs_mutex_down(&lco->lco_lock);
+ cfs_mutex_lock(&lco->lco_lock);
lco->lco_flags &= flags;
/* for each osc event update ea size */
if (lco->lco_dt_exp)
cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
- cfs_mutex_up(&lco->lco_lock);
+ cfs_mutex_unlock(&lco->lco_lock);
result = 0;
} else {
CERROR("unexpected notification from %s %s!\n",
extern cfs_atomic_t ldlm_srv_namespace_nr;
extern cfs_atomic_t ldlm_cli_namespace_nr;
-extern cfs_semaphore_t ldlm_srv_namespace_lock;
+extern cfs_mutex_t ldlm_srv_namespace_lock;
extern cfs_list_t ldlm_srv_namespace_list;
-extern cfs_semaphore_t ldlm_cli_namespace_lock;
+extern cfs_mutex_t ldlm_cli_namespace_lock;
extern cfs_list_t ldlm_cli_namespace_list;
static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client)
&ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
}
-static inline cfs_semaphore_t *ldlm_namespace_lock(ldlm_side_t client)
+static inline cfs_mutex_t *ldlm_namespace_lock(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
extern cfs_mem_cache_t *ldlm_resource_slab;
extern cfs_mem_cache_t *ldlm_lock_slab;
-static cfs_semaphore_t ldlm_ref_sem;
+static cfs_mutex_t ldlm_ref_mutex;
static int ldlm_refcount;
struct ldlm_cb_async_args {
{
int rc = 0;
ENTRY;
- cfs_mutex_down(&ldlm_ref_sem);
+ cfs_mutex_lock(&ldlm_ref_mutex);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
if (rc)
ldlm_refcount--;
}
- cfs_mutex_up(&ldlm_ref_sem);
+ cfs_mutex_unlock(&ldlm_ref_mutex);
RETURN(rc);
}
void ldlm_put_ref(void)
{
ENTRY;
- cfs_mutex_down(&ldlm_ref_sem);
+ cfs_mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
if (rc)
} else {
ldlm_refcount--;
}
- cfs_mutex_up(&ldlm_ref_sem);
+ cfs_mutex_unlock(&ldlm_ref_mutex);
EXIT;
}
int ldlm_init(void)
{
- cfs_init_mutex(&ldlm_ref_sem);
- cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
- cfs_init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ cfs_mutex_init(&ldlm_ref_mutex);
+ cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
CFS_SLAB_HWCACHE_ALIGN);
for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns);
}
/*
* Do not call shrink under ldlm_namespace_lock(client)
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
/*
* If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
/*
* Check all modest namespaces first.
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
ns_list_chain)
{
}
ldlm_pool_setup(&ns->ns_pool, l);
}
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
}
/*
* rid of potential deadlock on client nodes when canceling
* locks synchronously.
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
break;
}
ns = ldlm_namespace_first_locked(client);
cfs_spin_unlock(&ns->ns_lock);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
-cfs_semaphore_t ldlm_srv_namespace_lock;
+cfs_mutex_t ldlm_srv_namespace_lock;
CFS_LIST_HEAD(ldlm_srv_namespace_list);
-cfs_semaphore_t ldlm_cli_namespace_lock;
+cfs_mutex_t ldlm_cli_namespace_lock;
CFS_LIST_HEAD(ldlm_cli_namespace_list);
cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
/* Register @ns in the list of namespaces */
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
{
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
LASSERT(cfs_list_empty(&ns->ns_list_chain));
cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
cfs_atomic_inc(ldlm_namespace_nr(client));
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
}
/* Unregister @ns from the list of namespaces */
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
{
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
LASSERT(!cfs_list_empty(&ns->ns_list_chain));
/*
* Some asserts and possibly other parts of code still using
*/
cfs_list_del_init(&ns->ns_list_chain);
cfs_atomic_dec(ldlm_namespace_nr(client));
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
}
/* Should be called under ldlm_namespace_lock(client) taken */
void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
{
LASSERT(!cfs_list_empty(&ns->ns_list_chain));
- LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
+ LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
cfs_list_move_tail(&ns->ns_list_chain, ldlm_namespace_list(client));
}
/* Should be called under ldlm_namespace_lock(client) taken */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
- LASSERT_SEM_LOCKED(ldlm_namespace_lock(client));
+ LASSERT_MUTEX_LOCKED(ldlm_namespace_lock(client));
LASSERT(!cfs_list_empty(ldlm_namespace_list(client)));
return container_of(ldlm_namespace_list(client)->next,
struct ldlm_namespace, ns_list_chain);
lu_ref_init(&res->lr_reference);
/* one who creates the resource must unlock
- * the semaphore after lvb initialization */
- cfs_init_mutex_locked(&res->lr_lvb_sem);
+ * the mutex after lvb initialization */
+ cfs_mutex_init(&res->lr_lvb_mutex);
+ cfs_mutex_lock(&res->lr_lvb_mutex);
return res;
}
res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- cfs_down(&res->lr_lvb_sem);
- cfs_up(&res->lr_lvb_sem);
+ cfs_mutex_lock(&res->lr_lvb_mutex);
+ cfs_mutex_unlock(&res->lr_lvb_mutex);
}
return res;
}
res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- cfs_down(&res->lr_lvb_sem);
- cfs_up(&res->lr_lvb_sem);
+ cfs_mutex_lock(&res->lr_lvb_mutex);
+ cfs_mutex_unlock(&res->lr_lvb_mutex);
}
return res;
}
if (rc)
CERROR("lvbo_init failed for resource "
LPU64": rc %d\n", name->name[0], rc);
- /* we create resource with locked lr_lvb_sem */
- cfs_up(&res->lr_lvb_sem);
+ /* we create resource with locked lr_lvb_mutex */
+ cfs_mutex_unlock(&res->lr_lvb_mutex);
}
return res;
if (!((libcfs_debug | D_ERROR) & level))
return;
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
ldlm_namespace_dump(level, ns);
}
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
}
static int ldlm_res_hash_dump(cfs_hash_t *hs, cfs_hash_bd_t *bd,
struct lov_stripe_md *lli_smd;
char *lli_symlink_name;
- cfs_semaphore_t lli_open_sem;
__u64 lli_maxbytes;
unsigned long lli_flags;
__u64 lli_ioepoch;
ibits = MDS_INODELOCK_LOOKUP;
if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
- cfs_down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */
/*(*och_usecount)++; Do not let them steal our open
handle from under us */
hope the lock won't be invalidated in between. But
if it would be, we'll reopen the open request to
MDS later during file open path */
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
RETURN(1);
} else {
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
}
}
}
ldlm_lock_dump_handle(D_OTHER, &lockh);
- cfs_down(&lli->lli_readdir_sem);
+ cfs_mutex_lock(&lli->lli_readdir_mutex);
page = ll_dir_page_locate(dir, &lhash, &start, &end);
if (IS_ERR(page)) {
CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
goto fail;
}
out_unlock:
- cfs_up(&lli->lli_readdir_sem);
+ cfs_mutex_unlock(&lli->lli_readdir_mutex);
ldlm_lock_decref(&lockh, mode);
return page;
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
if (och) { /* There might be a race and somebody have freed this och
already */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- cfs_down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
/* Open a file, and (for the very first open) create objects on the OSTs at
* this time. If opened with O_LOV_DELAY_CREATE, then we don't do the object
- * creation or open until ll_lov_setstripe() ioctl is called. We grab
- * lli_open_sem to ensure no other process will create objects, send the
- * stripe MD to the MDS, or try to destroy the objects if that fails.
+ * creation or open until ll_lov_setstripe() ioctl is called.
*
* If we already have the stripe MD locally then we don't request it in
* md_open(), by passing a lmm_size = 0.
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
} else {
/* We cannot just request lock handle now, new ELC code
means that one of other OPEN locks for this file
could be cancelled, and since blocking ast handler
- would attempt to grab och_sem as well, that would
+ would attempt to grab och_mutex as well, that would
result in a deadlock */
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
it->it_create_mode &= ~M_CHECK_STALE;
if (rc)
GOTO(out_och_free, rc);
}
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
fd = NULL;
- /* Must do this outside lli_och_sem lock to prevent deadlock where
+ /* Must do this outside lli_och_mutex lock to prevent deadlock where
different kind of OPEN lock for this same inode gets cancelled
by ldlm_cancel_lru */
if (!S_ISREG(inode->i_mode))
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
out_openerr:
if (opendir_set != 0)
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
struct ccc_io *cio = ccc_env_io(env);
- int write_sem_locked = 0;
+ int write_mutex_locked = 0;
cio->cui_fd = LUSTRE_FPRIVATE(file);
vio->cui_io_subtype = args->via_io_subtype;
#endif
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- if(cfs_down_interruptible(&lli->lli_write_sem))
+ if (cfs_mutex_lock_interruptible(&lli->
+ lli_write_mutex))
GOTO(out, result = -ERESTARTSYS);
- write_sem_locked = 1;
+ write_mutex_locked = 1;
} else if (iot == CIT_READ) {
cfs_down_read(&lli->lli_trunc_sem);
}
LBUG();
}
result = cl_io_loop(env, io);
- if (write_sem_locked)
- cfs_up(&lli->lli_write_sem);
+ if (write_mutex_locked)
+ cfs_mutex_unlock(&lli->lli_write_mutex);
else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
cfs_up_read(&lli->lli_trunc_sem);
} else {
struct posix_acl *lli_posix_acl;
cfs_hlist_head_t *lli_remote_perms;
- cfs_semaphore_t lli_rmtperm_sem;
+ cfs_mutex_t lli_rmtperm_mutex;
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
__u64 lli_open_fd_exec_count;
/* Protects access to och pointers and their usage counters, also
* atomicity of check-update of lli_smd */
- cfs_semaphore_t lli_och_sem;
+ cfs_mutex_t lli_och_mutex;
struct inode lli_vfs_inode;
/* for directory */
struct {
/* serialize normal readdir and statahead-readdir. */
- cfs_semaphore_t d_readdir_sem;
+ cfs_mutex_t d_readdir_mutex;
/* metadata statahead */
/* since parent-child threads can share the same @file
pid_t d_opendir_pid;
} d;
-#define lli_readdir_sem u.d.d_readdir_sem
+#define lli_readdir_mutex u.d.d_readdir_mutex
#define lli_opendir_key u.d.d_opendir_key
#define lli_sai u.d.d_sai
#define lli_sa_pos u.d.d_sa_pos
* }
*/
cfs_rw_semaphore_t f_trunc_sem;
- cfs_semaphore_t f_write_sem;
+ cfs_mutex_t f_write_mutex;
/* for writepage() only to communicate to fsync */
int f_async_rc;
#define lli_symlink_name u.f.f_symlink_name
#define lli_maxbytes u.f.f_maxbytes
#define lli_trunc_sem u.f.f_trunc_sem
-#define lli_write_sem u.f.f_write_sem
+#define lli_write_mutex u.f.f_write_mutex
#define lli_async_rc u.f.f_async_rc
#define lli_write_rc u.f.f_write_rc
#define lli_glimpse_sem u.f.f_glimpse_sem
RETURN(NULL);
cfs_spin_lock_init(&sbi->ll_lock);
- cfs_init_mutex(&sbi->ll_lco.lco_lock);
+ cfs_mutex_init(&sbi->ll_lco.lco_lock);
cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
cfs_spin_lock_init(&sbi->ll_process_lock);
sbi->ll_rw_stats_on = 0;
GOTO(out_dt, err);
}
- cfs_mutex_down(&sbi->ll_lco.lco_lock);
+ cfs_mutex_lock(&sbi->ll_lco.lco_lock);
sbi->ll_lco.lco_flags = data->ocd_connect_flags;
sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
- cfs_mutex_up(&sbi->ll_lco.lco_lock);
+ cfs_mutex_unlock(&sbi->ll_lco.lco_lock);
fid_zero(&sbi->ll_root_fid);
err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
cfs_spin_lock_init(&lli->lli_lock);
lli->lli_posix_acl = NULL;
lli->lli_remote_perms = NULL;
- cfs_sema_init(&lli->lli_rmtperm_sem, 1);
+ cfs_mutex_init(&lli->lli_rmtperm_mutex);
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
CFS_INIT_LIST_HEAD(&lli->lli_close_list);
lli->lli_open_fd_read_count = 0;
lli->lli_open_fd_write_count = 0;
lli->lli_open_fd_exec_count = 0;
- cfs_sema_init(&lli->lli_och_sem, 1);
+ cfs_mutex_init(&lli->lli_och_mutex);
cfs_spin_lock_init(&lli->lli_agl_lock);
lli->lli_smd = NULL;
lli->lli_clob = NULL;
LASSERT(lli->lli_vfs_inode.i_mode != 0);
if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
- cfs_sema_init(&lli->lli_readdir_sem, 1);
+ cfs_mutex_init(&lli->lli_readdir_mutex);
lli->lli_opendir_key = NULL;
lli->lli_sai = NULL;
lli->lli_sa_pos = 0;
lli->lli_symlink_name = NULL;
lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
cfs_init_rwsem(&lli->lli_trunc_sem);
- cfs_sema_init(&lli->lli_write_sem, 1);
+ cfs_mutex_init(&lli->lli_write_mutex);
lli->lli_async_rc = 0;
lli->lli_write_rc = 0;
cfs_init_rwsem(&lli->lli_glimpse_sem);
if (lsm != NULL) {
LASSERT(S_ISREG(inode->i_mode));
- cfs_down(&lli->lli_och_sem);
+ cfs_mutex_lock(&lli->lli_och_mutex);
if (lli->lli_smd == NULL) {
if (lsm->lsm_magic != LOV_MAGIC_V1 &&
lsm->lsm_magic != LOV_MAGIC_V3) {
cfs_spin_lock(&lli->lli_lock);
lli->lli_smd = lsm;
cfs_spin_unlock(&lli->lli_lock);
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
lli->lli_maxbytes = lsm->lsm_maxbytes;
if (lli->lli_maxbytes > PAGE_CACHE_MAXBYTES)
lli->lli_maxbytes = PAGE_CACHE_MAXBYTES;
} else {
- cfs_up(&lli->lli_och_sem);
+ cfs_mutex_unlock(&lli->lli_och_mutex);
LASSERT(lli->lli_smd->lsm_magic == lsm->lsm_magic &&
lli->lli_smd->lsm_stripe_count ==
lsm->lsm_stripe_count);
struct bio *lo_biotail;
int lo_state;
cfs_semaphore_t lo_sem;
- cfs_semaphore_t lo_ctl_mutex;
+ cfs_mutex_t lo_ctl_mutex;
cfs_atomic_t lo_pending;
cfs_waitq_t lo_bh_wait;
static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
-static cfs_semaphore_t lloop_mutex;
+static cfs_mutex_t lloop_mutex;
static void *ll_iocontrol_magic = NULL;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_down(&lo->lo_ctl_mutex);
+ cfs_mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
- cfs_up(&lo->lo_ctl_mutex);
+ cfs_mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_down(&lo->lo_ctl_mutex);
+ cfs_mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
- cfs_up(&lo->lo_ctl_mutex);
+ cfs_mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
int err = 0;
#endif
- cfs_down(&lloop_mutex);
+ cfs_mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
err = -EINVAL;
break;
}
- cfs_up(&lloop_mutex);
+ cfs_mutex_unlock(&lloop_mutex);
return err;
}
CWARN("Enter llop_ioctl\n");
- cfs_down(&lloop_mutex);
+ cfs_mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_ATTACH: {
struct lloop_device *lo_free = NULL;
}
out:
- cfs_up(&lloop_mutex);
+ cfs_mutex_unlock(&lloop_mutex);
out1:
if (rcp)
*rcp = err;
goto out_mem3;
}
- cfs_init_mutex(&lloop_mutex);
+ cfs_mutex_init(&lloop_mutex);
for (i = 0; i < max_loop; i++) {
struct lloop_device *lo = &loop_dev[i];
if (!lo->lo_queue)
goto out_mem4;
- cfs_init_mutex(&lo->lo_ctl_mutex);
- cfs_init_mutex_locked(&lo->lo_sem);
+ cfs_mutex_init(&lo->lo_ctl_mutex);
+ cfs_sema_init(&lo->lo_sem, 0);
cfs_waitq_init(&lo->lo_bh_wait);
lo->lo_number = i;
cfs_spin_lock_init(&lo->lo_lock);
cfs_might_sleep();
- cfs_down(&lli->lli_rmtperm_sem);
+ cfs_mutex_lock(&lli->lli_rmtperm_mutex);
/* check again */
if (save != lli->lli_rmtperm_time) {
rc = do_check_remote_perm(lli, mask);
if (!rc || (rc != -ENOENT && i)) {
- cfs_up(&lli->lli_rmtperm_sem);
+ cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
break;
}
}
ll_i2suppgid(inode), &req);
capa_put(oc);
if (rc) {
- cfs_up(&lli->lli_rmtperm_sem);
+ cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
break;
}
perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (unlikely(perm == NULL)) {
- cfs_up(&lli->lli_rmtperm_sem);
+ cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
rc = -EPROTO;
break;
}
rc = ll_update_remote_perm(inode, perm);
- cfs_up(&lli->lli_rmtperm_sem);
+ cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
if (rc == -ENOMEM)
break;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op\n");
- /* on symlinks lli_open_sem protects lli_symlink_name allocation/data */
+
ll_inode_size_lock(inode, 0);
rc = ll_readlink_internal(inode, &request, &symname);
if (rc)
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) cfs_down(&lmv->init_sem);
-#define lmv_init_unlock(lmv) cfs_up(&lmv->init_sem);
+#define lmv_init_lock(lmv) cfs_mutex_lock(&lmv->init_mutex);
+#define lmv_init_unlock(lmv) cfs_mutex_unlock(&lmv->init_mutex);
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
/**
* Sema for protecting fields.
*/
- cfs_semaphore_t lo_guard;
+ cfs_mutex_t lo_guard;
/**
* Object state like O_FREEING.
*/
lmv_object_lock(struct lmv_object *obj)
{
LASSERT(obj);
- cfs_down(&obj->lo_guard);
+ cfs_mutex_lock(&obj->lo_guard);
}
static inline void
lmv_object_unlock(struct lmv_object *obj)
{
LASSERT(obj);
- cfs_up(&obj->lo_guard);
+ cfs_mutex_unlock(&obj->lo_guard);
}
void lmv_object_add(struct lmv_object *obj);
}
if (lmv->desc.ld_tgt_count == 0) {
+ lmv_init_unlock(lmv);
CERROR("%s: no targets configured.\n", obd->obd_name);
RETURN(-EINVAL);
}
* New seq alloc and FLD setup should be atomic. Otherwise we may find
* on server that seq in new allocated fid is not yet known.
*/
- cfs_down(&tgt->ltd_fid_sem);
+ cfs_mutex_lock(&tgt->ltd_fid_mutex);
if (!tgt->ltd_active)
GOTO(out, rc = -ENODEV);
EXIT;
out:
- cfs_up(&tgt->ltd_fid_sem);
+ cfs_mutex_unlock(&tgt->ltd_fid_mutex);
return rc;
}
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- cfs_sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+ cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
lmv->tgts[i].ltd_idx = i;
}
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
cfs_spin_lock_init(&lmv->lmv_lock);
- cfs_sema_init(&lmv->init_sem, 1);
+ cfs_mutex_init(&lmv->init_mutex);
rc = lmv_object_setup(obd);
if (rc) {
obj->lo_state = 0;
obj->lo_hashtype = mea->mea_magic;
- cfs_init_mutex(&obj->lo_guard);
+ cfs_mutex_init(&obj->lo_guard);
cfs_atomic_set(&obj->lo_count, 0);
obj->lo_objcount = mea->mea_count;
struct lov_obd *lov = &obd->u.lov;
/* nobody gets through here until lov_putref is done */
- cfs_mutex_down(&lov->lov_lock);
+ cfs_mutex_lock(&lov->lov_lock);
cfs_atomic_inc(&lov->lov_refcount);
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
return;
}
{
struct lov_obd *lov = &obd->u.lov;
- cfs_mutex_down(&lov->lov_lock);
+ cfs_mutex_lock(&lov->lov_lock);
/* ok to dec to 0 more than once -- ltd_exp's will be null */
if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
CFS_LIST_HEAD(kill);
lov->lov_tgts[i] = NULL;
lov->lov_death_row--;
}
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
cfs_list_del(&tgt->ltd_kill);
__lov_del_obd(obd, tgt);
}
} else {
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
}
}
if (tgt_obd == NULL)
RETURN(-EINVAL);
- cfs_mutex_down(&lov->lov_lock);
+ cfs_mutex_lock(&lov->lov_lock);
if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
tgt = lov->lov_tgts[index];
CERROR("UUID %s already assigned at LOV target index %d\n",
obd_uuid2str(&tgt->ltd_uuid), index);
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
RETURN(-EEXIST);
}
newsize = newsize << 1;
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
RETURN(-ENOMEM);
}
OBD_ALLOC_PTR(tgt);
if (!tgt) {
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
RETURN(-ENOMEM);
}
rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
if (rc) {
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
OBD_FREE_PTR(tgt);
RETURN(rc);
}
if (index >= lov->desc.ld_tgt_count)
lov->desc.ld_tgt_count = index + 1;
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
index, tgt->ltd_gen, lov->desc.ld_tgt_count);
lov->desc = *desc;
lov->lov_tgt_size = 0;
- cfs_sema_init(&lov->lov_lock, 1);
+ cfs_mutex_init(&lov->lov_lock);
cfs_atomic_set(&lov->lov_refcount, 0);
CFS_INIT_LIST_HEAD(&lov->lov_qos.lq_oss_list);
cfs_init_rwsem(&lov->lov_qos.lq_rw_sem);
}
cfs_down_write(&lov->lov_qos.lq_rw_sem);
- cfs_mutex_down(&lov->lov_lock);
+ cfs_mutex_lock(&lov->lov_lock);
cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (obd_uuid_equals(&oss->lqo_uuid,
&exp->exp_connection->c_remote_uuid)) {
oss->lqo_ost_count);
out:
- cfs_mutex_up(&lov->lov_lock);
+ cfs_mutex_unlock(&lov->lov_lock);
cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- cfs_semaphore_t *optional_sem)
+ cfs_mutex_t *optional_mutex)
{
int rc;
blocks, created, create);
return rc;
}
- if (optional_sem != NULL)
- cfs_down(optional_sem);
+ if (optional_mutex != NULL)
+ cfs_mutex_lock(optional_mutex);
rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
created, create);
- if (optional_sem != NULL)
- cfs_up(optional_sem);
+ if (optional_mutex != NULL)
+ cfs_mutex_unlock(optional_mutex);
return rc;
}
}
- cfs_mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_lock(&obd->obd_dev_mutex);
for (j = 0; j < count; j++) {
__u32 i = le32_to_cpu(data[j].l_ost_idx);
if (mds_lov_update_max_ost(&obd->u.mds, i)) {
break;
}
}
- cfs_mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_unlock(&obd->obd_dev_mutex);
RETURN(rc);
}
CDEBUG(D_CONFIG, "updated lov_desc, tgt_count: %d - idx %d / uuid %s\n",
mds->mds_lov_desc.ld_tgt_count, idx, uuid->uuid);
- cfs_mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_lock(&obd->obd_dev_mutex);
rc = mds_lov_update_max_ost(mds, idx);
- cfs_mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_unlock(&obd->obd_dev_mutex);
if (rc != 0)
GOTO(out, rc );
RETURN(-ENOTCONN);
}
- cfs_mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_lock(&obd->obd_dev_mutex);
rc = mds_lov_read_objids(obd);
- cfs_mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_unlock(&obd->obd_dev_mutex);
if (rc) {
CERROR("cannot read %s: rc = %d\n", "lov_objids", rc);
GOTO(err_exit, rc);
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &mdt_obj_ops;
- cfs_sema_init(&mo->mot_ioepoch_sem, 1);
- cfs_sema_init(&mo->mot_lov_sem, 1);
+ cfs_mutex_init(&mo->mot_ioepoch_mutex);
+ cfs_mutex_init(&mo->mot_lov_mutex);
RETURN(o);
} else
RETURN(NULL);
CFS_INIT_LIST_HEAD(&med->med_open_head);
cfs_spin_lock_init(&med->med_open_lock);
- cfs_sema_init(&med->med_idmap_sem, 1);
+ cfs_mutex_init(&med->med_idmap_mutex);
med->med_idmap = NULL;
cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
ENTRY;
if (exp_connect_rmtclient(exp)) {
- cfs_down(&med->med_idmap_sem);
+ cfs_mutex_lock(&med->med_idmap_mutex);
if (!med->med_idmap)
med->med_idmap = lustre_idmap_init();
- cfs_up(&med->med_idmap_sem);
+ cfs_mutex_unlock(&med->med_idmap_mutex);
if (IS_ERR(med->med_idmap)) {
long err = PTR_ERR(med->med_idmap);
void mdt_cleanup_idmap(struct mdt_export_data *med)
{
- cfs_down(&med->med_idmap_sem);
+ cfs_mutex_lock(&med->med_idmap_mutex);
if (med->med_idmap != NULL) {
lustre_idmap_fini(med->med_idmap);
med->med_idmap = NULL;
}
- cfs_up(&med->med_idmap_sem);
+ cfs_mutex_unlock(&med->med_idmap_mutex);
}
static inline void mdt_revoke_export_locks(struct obd_export *exp)
int mot_ioepoch_count;
int mot_writecount;
/* Lock to protect object's IO epoch. */
- cfs_semaphore_t mot_ioepoch_sem;
+ cfs_mutex_t mot_ioepoch_mutex;
/* Lock to protect create_data */
- cfs_semaphore_t mot_lov_sem;
+ cfs_mutex_t mot_lov_mutex;
};
enum mdt_object_flags {
ma->ma_need = MA_INODE | MA_LOV;
ma->ma_valid = 0;
- cfs_down(&o->mot_lov_sem);
+ cfs_mutex_lock(&o->mot_lov_mutex);
if (!(o->mot_flags & MOF_LOV_CREATED)) {
rc = mdo_create_data(info->mti_env,
p ? mdt_object_child(p) : NULL,
if (rc == 0 && ma->ma_valid & MA_LOV)
o->mot_flags |= MOF_LOV_CREATED;
}
- cfs_up(&o->mot_lov_sem);
+ cfs_mutex_unlock(&o->mot_lov_mutex);
RETURN(rc);
}
/**
* Re-enable Size-on-MDS.
- * Call under ->mot_ioepoch_sem.
+ * Call under ->mot_ioepoch_mutex.
*/
static void mdt_object_som_enable(struct mdt_object *mo, __u64 ioepoch)
{
/**
* Open the IOEpoch. It is allowed if @writecount is not negative.
- * The epoch and writecount handling is performed under the mot_ioepoch_sem.
+ * The epoch and writecount handling is performed under the mot_ioepoch_mutex.
*/
int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
int created)
!S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
RETURN(0);
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
if (mdt_ioepoch_opened(o)) {
/* Epoch continues even if there is no writers yet. */
CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
cancel = 1;
}
o->mot_ioepoch_count++;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
/* Cancel Size-on-MDS attributes cached on clients for the open case.
* In the truncate case, see mdt_reint_setattr(). */
* Update SOM on-disk attributes.
* If enabling, write update inodes and lustre-ea with the proper IOEpoch,
* mountid and attributes. If disabling, zero IOEpoch id in lustre-ea.
- * Call under ->mot_ioepoch_sem.
+ * Call under ->mot_ioepoch_mutex.
*/
static int mdt_som_attr_set(struct mdt_thread_info *info,
struct mdt_object *obj, __u64 ioepoch, int enable)
{
int rc = 0;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
CDEBUG(D_INODE, "Eviction. Closing IOepoch "LPU64" on "DFID". "
"Count %d\n", o->mot_ioepoch, PFID(mdt_object_fid(o)),
o->mot_ioepoch_count);
rc = mdt_som_attr_set(info, o, o->mot_ioepoch, MDT_SOM_DISABLE);
mdt_object_som_enable(o, o->mot_ioepoch);
}
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
int rc = MDT_IOEPOCH_CLOSED;
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
CDEBUG(D_INODE, "Replay. Closing epoch "LPU64" on "DFID". Count %d\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_ioepoch_count);
o->mot_ioepoch_count--;
if (!mdt_ioepoch_opened(o))
mdt_object_som_enable(o, info->mti_ioepoch->ioepoch);
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
la = &info->mti_attr.ma_attr;
achange = (info->mti_ioepoch->flags & MF_SOM_CHANGE);
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
o->mot_ioepoch_count--;
tmp_ma = &info->mti_u.som.attr;
mdt_object_som_enable(o, o->mot_ioepoch);
}
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
/* If recovery is needed, tell the client to perform GETATTR under
* the lock. */
if (ret == MDT_IOEPOCH_GETATTR && recovery) {
RETURN(rc ? : ret);
error_up:
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
return rc;
}
!(info->mti_attr.ma_attr.la_valid & LA_SIZE)))
act = MDT_SOM_DISABLE;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
/* Mark the object it is the recovery state if we failed to obtain
* SOM attributes. */
if (act == MDT_SOM_DISABLE)
rc = mdt_som_attr_set(info, o, ioepoch, act);
mdt_object_som_enable(o, ioepoch);
}
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
rc = o->mot_writecount;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
if (o->mot_writecount < 0)
rc = -ETXTBSY;
else
o->mot_writecount++;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
void mdt_write_put(struct mdt_object *o)
{
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
o->mot_writecount--;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
EXIT;
}
{
int rc = 0;
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
if (o->mot_writecount > 0)
rc = -ETXTBSY;
else
o->mot_writecount--;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
static void mdt_write_allow(struct mdt_object *o)
{
ENTRY;
- cfs_down(&o->mot_ioepoch_sem);
+ cfs_mutex_lock(&o->mot_ioepoch_mutex);
o->mot_writecount++;
- cfs_up(&o->mot_ioepoch_sem);
+ cfs_mutex_unlock(&o->mot_ioepoch_mutex);
EXIT;
}
/* update lcd in memory only for resent cases */
ted = &req->rq_export->exp_target_data;
LASSERT(ted);
- cfs_mutex_down(&ted->ted_lcd_lock);
+ cfs_mutex_lock(&ted->ted_lcd_lock);
lcd = ted->ted_lcd;
if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE ||
lustre_msg_get_opc(req->rq_reqmsg) == MDS_DONE_WRITING) {
lcd->lcd_last_result = rc;
lcd->lcd_last_data = info->mti_opdata;
}
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
EXIT;
}
ted->ted_lr_idx = cl_idx;
ted->ted_lr_off = lsd->lsd_client_start +
(cl_idx * lsd->lsd_client_size);
- cfs_init_mutex(&ted->ted_lcd_lock);
+ cfs_mutex_init(&ted->ted_lcd_lock);
LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
ted->ted_lr_idx = cl_idx;
ted->ted_lr_off = lsd->lsd_client_start +
(cl_idx * lsd->lsd_client_size);
- cfs_init_mutex(&ted->ted_lcd_lock);
+ cfs_mutex_init(&ted->ted_lcd_lock);
LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
if (rc)
GOTO(stop, rc);
- cfs_mutex_down(&ted->ted_lcd_lock);
+ cfs_mutex_lock(&ted->ted_lcd_lock);
memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
rc = mdt_last_rcvd_write(env, mdt, ted->ted_lcd, &off, th);
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
stop:
mdt_trans_stop(env, mdt, th);
ted = &req->rq_export->exp_target_data;
LASSERT(ted);
- cfs_mutex_down(&ted->ted_lcd_lock);
+ cfs_mutex_lock(&ted->ted_lcd_lock);
lcd = ted->ted_lcd;
/* if the export has already been disconnected, we have no last_rcvd slot,
* update server data with latest transno then */
if (lcd == NULL) {
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
CWARN("commit transaction for disconnected client %s: rc %d\n",
req->rq_export->exp_client_uuid.uuid, rc);
err = mdt_last_rcvd_header_write(mti->mti_env, mdt, th);
req->rq_export->exp_vbr_failed = 1;
cfs_spin_unlock(&req->rq_export->exp_lock);
}
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
}
lcd->lcd_last_close_transno = mti->mti_transno;
req->rq_export->exp_vbr_failed = 1;
cfs_spin_unlock(&req->rq_export->exp_lock);
}
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
}
lcd->lcd_last_transno = mti->mti_transno;
} else {
err = mdt_last_rcvd_write(mti->mti_env, mdt, lcd, &off, th);
}
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
RETURN(err);
}
RETURN(0);
}
-CFS_DECLARE_MUTEX(llog_process_lock);
+CFS_DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
if (rc)
return rc;
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
return 0;
}
struct mgs_tgt_srpc_conf *srpc_tgt;
int i;
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
seq_printf(seq, "fsname: %s\n", fsdb->fsdb_name);
seq_printf(seq, "flags: %#lx gen: %d\n",
lprocfs_rd_ir_state(seq, fsdb);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
return 0;
}
/* Internal mgs setup */
mgs_init_fsdb_list(obd);
- cfs_sema_init(&mgs->mgs_sem, 1);
+ cfs_mutex_init(&mgs->mgs_mutex);
mgs->mgs_start_time = cfs_time_current_sec();
/* Setup proc */
}
/*
- * Log writing contention is handled by the fsdb_sem.
+ * Log writing contention is handled by the fsdb_mutex.
*
* It should be alright if someone was reading while we were
* updating the logs - if we revoke at the end they will just update
if (rc)
return rc;
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
LUSTRE_SP_MGC, LUSTRE_SP_MGS,
req->rq_peer.nid,
/* by defualt allow any flavors */
flvr.sf_rpc = SPTLRPC_FLVR_ANY;
}
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
cfs_spin_lock(&exp->exp_lock);
*/
cfs_list_t mfc_export_list;
/**
- * list of fs clients in the same fsdb, protected by fsdb->fsdb_sem
+ * list of fs clients in the same fsdb, protected by fsdb->fsdb_mutex
*/
cfs_list_t mfc_fsdb_list;
unsigned mfc_ir_capable:1;
struct fs_db {
char fsdb_name[9];
cfs_list_t fsdb_list; /* list of databases */
- cfs_semaphore_t fsdb_sem;
+ cfs_mutex_t fsdb_mutex;
void *fsdb_ost_index_map; /* bitmap of used indicies */
void *fsdb_mdt_index_map; /* bitmap of used indicies */
int fsdb_mdt_count;
struct sptlrpc_rule_set fsdb_srpc_gen;
struct mgs_tgt_srpc_conf *fsdb_srpc_tgt;
- /* list of fs clients, mgs_fsc. protected by mgs_sem */
+ /* list of fs clients, mgs_fsc. protected by mgs_mutex */
cfs_list_t fsdb_clients;
int fsdb_nonir_clients;
int fsdb_ir_state;
RETURN(rc);
}
-/* fsdb->fsdb_sem is already held in mgs_find_or_make_fsdb*/
+/* fsdb->fsdb_mutex is already held in mgs_find_or_make_fsdb*/
static int mgs_get_fsdb_from_llog(struct obd_device *obd, struct fs_db *fsdb)
{
char *logname;
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
LASSERT(ctxt != NULL);
name_create(&logname, fsdb->fsdb_name, "-client");
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
rc = llog_create(ctxt, &loghandle, NULL, logname);
if (rc)
rc = rc2;
out_pop:
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
name_destroy(&logname);
llog_ctxt_put(ctxt);
RETURN(NULL);
strcpy(fsdb->fsdb_name, fsname);
- cfs_sema_init(&fsdb->fsdb_sem, 1);
+ cfs_mutex_init(&fsdb->fsdb_mutex);
cfs_set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
if (strcmp(fsname, MGSSELF_NAME) == 0) {
static void mgs_free_fsdb(struct obd_device *obd, struct fs_db *fsdb)
{
/* wait for anyone with the sem */
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
lproc_mgs_del_live(obd, fsdb);
cfs_list_del(&fsdb->fsdb_list);
name_destroy(&fsdb->fsdb_mdtlmv);
name_destroy(&fsdb->fsdb_mdc);
mgs_free_fsdb_srpc(fsdb);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
OBD_FREE_PTR(fsdb);
}
struct mgs_obd *mgs = &obd->u.mgs;
struct fs_db *fsdb;
cfs_list_t *tmp, *tmp2;
- cfs_down(&mgs->mgs_sem);
+ cfs_mutex_lock(&mgs->mgs_mutex);
cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
mgs_free_fsdb(obd, fsdb);
}
- cfs_up(&mgs->mgs_sem);
+ cfs_mutex_unlock(&mgs->mgs_mutex);
return 0;
}
struct fs_db *fsdb;
int rc = 0;
- cfs_down(&mgs->mgs_sem);
+ cfs_mutex_lock(&mgs->mgs_mutex);
fsdb = mgs_find_fsdb(obd, name);
if (fsdb) {
- cfs_up(&mgs->mgs_sem);
+ cfs_mutex_unlock(&mgs->mgs_mutex);
*dbh = fsdb;
return 0;
}
CDEBUG(D_MGS, "Creating new db\n");
fsdb = mgs_new_fsdb(obd, name);
- cfs_up(&mgs->mgs_sem);
+ cfs_mutex_unlock(&mgs->mgs_mutex);
if (!fsdb)
return -ENOMEM;
RETURN(rc);
}
-/* fsdb->fsdb_sem is already held in mgs_write_log_target*/
+/* fsdb->fsdb_mutex is already held in mgs_write_log_target*/
/* stealed from mgs_get_fsdb_from_llog*/
static int mgs_steal_llog_for_mdt_from_client(struct obd_device *obd,
char *client_name,
the failover list. Modify mti->params for rewriting back at
server_register_target(). */
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
rc = mgs_write_log_add_failnid(obd, fsdb, mti);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
RETURN(rc);
#endif
}
}
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
if (mti->mti_flags &
(LDD_F_VIRGIN | LDD_F_UPGRADE14 | LDD_F_WRITECONF)) {
OBD_FREE(buf, strlen(mti->mti_params) + 1);
out_up:
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
RETURN(rc);
}
RETURN(rc);
}
- cfs_down(&mgs->mgs_sem);
+ cfs_mutex_lock(&mgs->mgs_mutex);
/* Delete the fs db */
fsdb = mgs_find_fsdb(obd, fsname);
OBD_FREE(dirent, sizeof(*dirent));
}
- cfs_up(&mgs->mgs_sem);
+ cfs_mutex_unlock(&mgs->mgs_mutex);
RETURN(rc);
}
mti->mti_flags = rc | LDD_F_PARAM;
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
rc = mgs_write_log_param(obd, fsdb, mti, mti->mti_params);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
/*
* Revoke lock so everyone updates. Should be alright if
}
}
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
if (canceled_label != NULL) {
OBD_ALLOC_PTR(mti);
cmd, fsname, poolname, ostname, label);
name_destroy(&logname);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
/* request for update */
mgs_revoke_lock(obd, fsdb, CONFIG_T_CONFIG);
cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
}
-/* caller must have held fsdb_sem */
+/* caller must have held fsdb_mutex */
static inline void ir_state_graduate(struct fs_db *fsdb)
{
struct mgs_obd *mgs = &fsdb->fsdb_obd->u.mgs;
return rc;
/* check ir state */
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
ir_state_graduate(fsdb);
switch (fsdb->fsdb_ir_state) {
case IR_FULL:
default:
LBUG();
}
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
if (notify) {
CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
if (state == IR_FULL && fsdb->fsdb_nonir_clients)
state = IR_PARTIAL;
fsdb->fsdb_ir_state = state;
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
return 0;
}
struct timeval tv_max;
struct timeval tv;
- /* mgs_live_seq_show() already holds fsdb_sem. */
+ /* mgs_live_seq_show() already holds fsdb_mutex. */
ir_state_graduate(fsdb);
seq_printf(seq, "\nimperative_recovery_state:\n");
!!(exp->exp_connect_flags & OBD_CONNECT_IMP_RECOV);
rc = -EEXIST;
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
/* tend to find it in export list because this list is shorter. */
cfs_spin_lock(&data->med_lock);
rc = 0;
}
cfs_spin_unlock(&data->med_lock);
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
if (new_fsc) {
class_export_put(new_fsc->mfc_export);
LASSERT(fsc->mfc_export == exp);
- cfs_down(&fsdb->fsdb_sem);
+ cfs_mutex_lock(&fsdb->fsdb_mutex);
cfs_list_del_init(&fsc->mfc_fsdb_list);
if (fsc->mfc_ir_capable == 0) {
--fsdb->fsdb_nonir_clients;
fsdb->fsdb_ir_state == IR_PARTIAL)
fsdb->fsdb_ir_state = IR_FULL;
}
- cfs_up(&fsdb->fsdb_sem);
+ cfs_mutex_unlock(&fsdb->fsdb_mutex);
cfs_list_del_init(&fsc->mfc_export_list);
class_export_put(fsc->mfc_export);
OBD_FREE_PTR(fsc);
}
}
-/* must be called with fsdb->fsdb_sem held */
+/* must be called with fsdb->fsdb_mutex held */
void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
{
struct mgs_fsc *fsc, *tmp;
imp->imp_last_success_conn = 0;
imp->imp_state = LUSTRE_IMP_NEW;
imp->imp_obd = class_incref(obd, "import", imp);
- cfs_sema_init(&imp->imp_sec_mutex, 1);
+ cfs_mutex_init(&imp->imp_sec_mutex);
cfs_waitq_init(&imp->imp_recovery_waitq);
cfs_atomic_set(&imp->imp_refcount, 2);
if (!idarray)
RETURN(-ENOMEM);
- cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_lock(&obd->obd_olg.olg_cat_processing);
rc = llog_get_cat_list(obd, name, 0, count, idarray);
if (rc)
GOTO(out, rc);
}
out:
/* release semaphore */
- cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_unlock(&obd->obd_olg.olg_cat_processing);
OBD_FREE_LARGE(idarray, size);
RETURN(rc);
ctxt->loc_olg = olg;
ctxt->loc_idx = index;
ctxt->loc_logops = op;
- cfs_sema_init(&ctxt->loc_sem, 1);
+ cfs_mutex_init(&ctxt->loc_mutex);
ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
* Global list of all sites on this node
*/
static CFS_LIST_HEAD(lu_sites);
-static CFS_DECLARE_MUTEX(lu_sites_guard);
+static CFS_DEFINE_MUTEX(lu_sites_guard);
/**
* Global environment used by site shrinker.
*/
void lu_site_fini(struct lu_site *s)
{
- cfs_down(&lu_sites_guard);
+ cfs_mutex_lock(&lu_sites_guard);
cfs_list_del_init(&s->ls_linkage);
- cfs_up(&lu_sites_guard);
+ cfs_mutex_unlock(&lu_sites_guard);
if (s->ls_obj_hash != NULL) {
cfs_hash_putref(s->ls_obj_hash);
int lu_site_init_finish(struct lu_site *s)
{
int result;
- cfs_down(&lu_sites_guard);
+ cfs_mutex_lock(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
cfs_list_add(&s->ls_linkage, &lu_sites);
- cfs_up(&lu_sites_guard);
+ cfs_mutex_unlock(&lu_sites_guard);
return result;
}
EXPORT_SYMBOL(lu_site_init_finish);
CDEBUG(D_INODE, "Shrink %d objects\n", remain);
}
- cfs_down(&lu_sites_guard);
+ cfs_mutex_lock(&lu_sites_guard);
cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
if (shrink_param(sc, nr_to_scan) != 0) {
remain = lu_site_purge(&lu_shrink_env, s, remain);
break;
}
cfs_list_splice(&splice, lu_sites.prev);
- cfs_up(&lu_sites_guard);
+ cfs_mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
if (shrink_param(sc, nr_to_scan) == 0)
* conservatively. This should not be too bad, because this
* environment is global.
*/
- cfs_down(&lu_sites_guard);
+ cfs_mutex_lock(&lu_sites_guard);
result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
- cfs_up(&lu_sites_guard);
+ cfs_mutex_unlock(&lu_sites_guard);
if (result != 0)
return result;
* Tear shrinker environment down _after_ de-registering
* lu_global_key, because the latter has a value in the former.
*/
- cfs_down(&lu_sites_guard);
+ cfs_mutex_lock(&lu_sites_guard);
lu_env_fini(&lu_shrink_env);
- cfs_up(&lu_sites_guard);
+ cfs_mutex_unlock(&lu_sites_guard);
lu_ref_global_fini();
}
CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
cfs_spin_lock_init(&obd->obd_nid_lock);
cfs_spin_lock_init(&obd->obd_dev_lock);
- cfs_sema_init(&obd->obd_dev_sem, 1);
+ cfs_mutex_init(&obd->obd_dev_mutex);
cfs_spin_lock_init(&obd->obd_osfs_lock);
/* obd->obd_osfs_age must be set to a value in the distant
* past to guarantee a fresh statfs is fetched on mount. */
/*********** mount lookup *********/
-CFS_DECLARE_MUTEX(lustre_mount_info_lock);
+CFS_DEFINE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
}
strcpy(name_cp, name);
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_mnt = mnt;
cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
struct lustre_mount_info *lmi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
struct lustre_mount_info *lmi;
ENTRY;
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
/* This might be the last one, can't deref after this */
unlock_mntput(mnt);
- cfs_down(&lustre_mount_info_lock);
+ cfs_mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_up(&lustre_mount_info_lock);
+ cfs_mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
RETURN(rc);
}
-CFS_DECLARE_MUTEX(mgc_start_lock);
+CFS_DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
RETURN(-EINVAL);
}
- cfs_mutex_down(&mgc_start_lock);
+ cfs_mutex_lock(&mgc_start_lock);
len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
OBD_ALLOC(mgcname, len);
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- cfs_mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
if (data)
OBD_FREE_PTR(data);
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- cfs_mutex_down(&mgc_start_lock);
+ cfs_mutex_lock(&mgc_start_lock);
LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
OBD_FREE(niduuid, len);
/* class_import_put will get rid of the additional connections */
- cfs_mutex_up(&mgc_start_lock);
+ cfs_mutex_unlock(&mgc_start_lock);
RETURN(rc);
}
RETURN(rc);
}
-CFS_DECLARE_MUTEX(server_start_lock);
+CFS_DEFINE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lddflags, int lsiflags)
int rc = 0;
ENTRY;
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
RETURN(rc);
}
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
if (IS_OST(lsi->lsi_ldd)) {
/* make sure OSS is started */
- cfs_mutex_down(&server_start_lock);
+ cfs_mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0);
if (rc) {
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_up(&server_start_lock);
+ cfs_mutex_unlock(&server_start_lock);
}
/* Set the mgc fs to our server disk. This allows the MGC to
if (!exp->exp_obd->obd_replayable || oti == NULL)
RETURN(rc);
- cfs_mutex_down(&ted->ted_lcd_lock);
+ cfs_mutex_lock(&ted->ted_lcd_lock);
lcd = ted->ted_lcd;
/* if the export has already been disconnected, we have no last_rcvd slot,
* update server data with latest transno then */
if (lcd == NULL) {
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
CWARN("commit transaction for disconnected client %s: rc %d\n",
exp->exp_client_uuid.uuid, rc);
err = filter_update_server_data(exp->exp_obd);
exp->exp_vbr_failed = 1;
cfs_spin_unlock(&exp->exp_lock);
cfs_spin_unlock(&obt->obt_lut->lut_translock);
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
RETURN(-EOVERFLOW);
}
}
CDEBUG(log_pri, "wrote trans "LPU64" for client %s at #%d: err = %d\n",
last_rcvd, lcd->lcd_uuid, ted->ted_lr_idx, err);
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
RETURN(rc);
}
ted->ted_lr_idx = cl_idx;
ted->ted_lr_off = le32_to_cpu(lsd->lsd_client_start) +
cl_idx * le16_to_cpu(lsd->lsd_client_size);
- cfs_init_mutex(&ted->ted_lcd_lock);
+ cfs_mutex_init(&ted->ted_lcd_lock);
LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
CDEBUG(D_INFO, "client at index %d (%llu) with UUID '%s' added\n",
* be in server data or in client data in case of failure */
filter_update_server_data(exp->exp_obd);
- cfs_mutex_down(&ted->ted_lcd_lock);
+ cfs_mutex_lock(&ted->ted_lcd_lock);
memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
rc = fsfilt_write_record(exp->exp_obd, obt->obt_rcvd_filp,
ted->ted_lcd,
sizeof(*ted->ted_lcd), &off, 0);
- cfs_mutex_up(&ted->ted_lcd_lock);
+ cfs_mutex_unlock(&ted->ted_lcd_lock);
pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
CDEBUG(rc == 0 ? D_INFO : D_ERROR,
struct filter_obd *filter = &obd->u.filter;
int old_count, group, rc = 0;
- cfs_down(&filter->fo_init_lock);
+ cfs_mutex_lock(&filter->fo_init_lock);
old_count = filter->fo_group_count;
for (group = old_count; group <= last_group; group++) {
rc = filter_read_group_internal(obd, group, create);
if (rc != 0)
break;
}
- cfs_up(&filter->fo_init_lock);
+ cfs_mutex_unlock(&filter->fo_init_lock);
return rc;
}
* therefore, that res->lr_lvb_data cannot increase beyond the
* end of already granted lock. As a result, it is safe to
* check against "stale" reply_lvb->lvb_size value without
- * res->lr_lvb_sem.
+ * res->lr_lvb_mutex.
*/
arg.size = reply_lvb->lvb_size;
arg.victim = &l;
obd->obd_lvfs_ctxt.fs = get_ds();
obd->obd_lvfs_ctxt.cb_ops = filter_lvfs_ops;
- cfs_init_mutex(&filter->fo_init_lock);
+ cfs_mutex_init(&filter->fo_init_lock);
filter->fo_committed_group = 0;
filter->fo_destroys_in_progress = 0;
for (i = 0; i < 32; i++)
- cfs_sema_init(&filter->fo_create_locks[i], 1);
+ cfs_mutex_init(&filter->fo_create_locks[i]);
cfs_spin_lock_init(&filter->fo_objidlock);
CFS_INIT_LIST_HEAD(&filter->fo_export_list);
- cfs_sema_init(&filter->fo_alloc_lock, 1);
+ cfs_mutex_init(&filter->fo_alloc_lock);
init_brw_stats(&filter->fo_filter_stats);
cfs_spin_lock_init(&filter->fo_flags_lock);
filter->fo_read_cache = 1; /* enable read-only cache by default */
* This is safe to do, as llog is already synchronized
* and its import may go.
*/
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp) {
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = NULL;
}
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
llog_ctxt_put(ctxt);
}
if (filter->fo_lcm) {
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
llog_recov_thread_fini(filter->fo_lcm, obd->obd_force);
filter->fo_lcm = NULL;
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
}
RETURN(filter_olg_fini(&obd->obd_olg));
}
int skip_orphan;
ENTRY;
- LASSERT(down_trylock(&filter->fo_create_locks[oa->o_seq]) != 0);
+ LASSERT_MUTEX_LOCKED(&filter->fo_create_locks[oa->o_seq]);
memset(&doa, 0, sizeof(doa));
}
/* This causes inflight precreates to abort and drop lock */
cfs_set_bit(group, &filter->fo_destroys_in_progress);
- cfs_down(&filter->fo_create_locks[group]);
+ cfs_mutex_lock(&filter->fo_create_locks[group]);
if (!cfs_test_bit(group, &filter->fo_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
exp->exp_obd->obd_name, group);
- cfs_up(&filter->fo_create_locks[group]);
+ cfs_mutex_unlock(&filter->fo_create_locks[group]);
RETURN(0);
}
diff = oa->o_id - last;
cfs_clear_bit(group, &filter->fo_destroys_in_progress);
}
} else {
- cfs_down(&filter->fo_create_locks[group]);
+ cfs_mutex_lock(&filter->fo_create_locks[group]);
if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
CERROR("%s: dropping old precreate request\n",
obd->obd_name);
/* else diff == 0 */
GOTO(out, rc = 0);
out:
- cfs_up(&filter->fo_create_locks[group]);
+ cfs_mutex_unlock(&filter->fo_create_locks[group]);
return rc;
}
filter = &obd->u.filter;
- LASSERT(down_trylock(&filter->fo_create_locks[group]) != 0);
+ LASSERT_MUTEX_LOCKED(&filter->fo_create_locks[group]);
OBD_FAIL_TIMEOUT(OBD_FAIL_TGT_DELAY_PRECREATE, obd_timeout / 2);
rc = -EINVAL;
} else {
diff = 1;
- cfs_down(&filter->fo_create_locks[oa->o_seq]);
+ cfs_mutex_lock(&filter->fo_create_locks[oa->o_seq]);
rc = filter_precreate(obd, oa, oa->o_seq, &diff);
- cfs_up(&filter->fo_create_locks[oa->o_seq]);
+ cfs_mutex_unlock(&filter->fo_create_locks[oa->o_seq]);
}
} else {
rc = filter_handle_precreate(exp, oa, oa->o_seq, oti);
struct inode *inode = dchild->d_inode;
int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
int rc, rc2, create;
- cfs_semaphore_t *sem;
+ cfs_mutex_t *mutex;
ENTRY;
LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
if (iobuf->dr_npages == 0)
RETURN(0);
create = 0;
- sem = NULL;
+ mutex = NULL;
} else {
LASSERTF(rw == OBD_BRW_WRITE, "%x\n", rw);
LASSERT(iobuf->dr_npages > 0);
create = 1;
- sem = &obd->u.filter.fo_alloc_lock;
+ mutex = &obd->u.filter.fo_alloc_lock;
lquota_enforce(filter_quota_interface_ref, obd,
iobuf->dr_ignore_quota);
} else {
rc = fsfilt_map_inode_pages(obd, inode, iobuf->dr_pages,
iobuf->dr_npages, iobuf->dr_blocks,
- obdfilter_created_scratchpad, create, sem);
+ obdfilter_created_scratchpad, create, mutex);
}
if (rw == OBD_BRW_WRITE) {
return 0;
}
-/* Called with res->lr_lvb_sem held */
+/* Called with res->lr_lvb_mutex held */
static int filter_lvbo_init(struct ldlm_resource *res)
{
struct ost_lvb *lvb = NULL;
ENTRY;
LASSERT(res);
- LASSERT_SEM_LOCKED(&res->lr_lvb_sem);
+ LASSERT_MUTEX_LOCKED(&res->lr_lvb_mutex);
if (res->lr_lvb_data)
RETURN(0);
LASSERT(olg == &obd->obd_olg);
- cfs_mutex_down(&olg->olg_cat_processing);
+ cfs_mutex_lock(&olg->olg_cat_processing);
rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
if (rc) {
CERROR("rc: %d\n", rc);
}
out:
- cfs_mutex_up(&olg->olg_cat_processing);
+ cfs_mutex_unlock(&olg->olg_cat_processing);
return rc;
}
ptlrpc_add_rqs_to_pool);
CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- cfs_sema_init(&cli->cl_grant_sem, 1);
ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
}
lprocfs_ost_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars);
- cfs_sema_init(&ost->ost_health_sem, 1);
+ cfs_mutex_init(&ost->ost_health_mutex);
if (oss_num_threads) {
/* If oss_num_threads is set, it is the min and the max. */
/* there is no recovery for OST OBD, all recovery is controlled by
* obdfilter OBD */
LASSERT(obd->obd_recovering == 0);
- cfs_down(&ost->ost_health_sem);
+ cfs_mutex_lock(&ost->ost_health_mutex);
ptlrpc_unregister_service(ost->ost_service);
ptlrpc_unregister_service(ost->ost_create_service);
ptlrpc_unregister_service(ost->ost_io_service);
ost->ost_service = NULL;
ost->ost_create_service = NULL;
- cfs_up(&ost->ost_health_sem);
+ cfs_mutex_unlock(&ost->ost_health_mutex);
lprocfs_obd_cleanup(obd);
struct ost_obd *ost = &obd->u.ost;
int rc = 0;
- cfs_down(&ost->ost_health_sem);
+ cfs_mutex_lock(&ost->ost_health_mutex);
rc |= ptlrpc_service_health_check(ost->ost_service);
rc |= ptlrpc_service_health_check(ost->ost_create_service);
rc |= ptlrpc_service_health_check(ost->ost_io_service);
- cfs_up(&ost->ost_health_sem);
+ cfs_mutex_unlock(&ost->ost_health_mutex);
/*
* health_check to return 0 on healthy
#include <libcfs/list.h>
#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
- cfs_mutex_down(&ctxt->loc_sem); \
+ cfs_mutex_lock(&ctxt->loc_mutex); \
if (ctxt->loc_imp) { \
imp = class_import_get(ctxt->loc_imp); \
} else { \
"but I'll try again next time. Not fatal.\n", \
ctxt->loc_idx); \
imp = NULL; \
- cfs_mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_unlock(&ctxt->loc_mutex); \
return (-EINVAL); \
} \
- cfs_mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_unlock(&ctxt->loc_mutex); \
} while(0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
- cfs_mutex_down(&ctxt->loc_sem); \
+ cfs_mutex_lock(&ctxt->loc_mutex); \
if (ctxt->loc_imp != imp) \
CWARN("loc_imp has changed from %p to %p\n", \
ctxt->loc_imp, imp); \
class_import_put(imp); \
- cfs_mutex_up(&ctxt->loc_sem); \
+ cfs_mutex_unlock(&ctxt->loc_mutex); \
} while(0)
/* This is a callback from the llog_* functions.
ENTRY;
LASSERT(ctxt);
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != imp) {
if (ctxt->loc_imp) {
CWARN("changing the import %p - %p\n",
}
ctxt->loc_imp = class_import_get(imp);
}
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
RETURN(0);
}
EXPORT_SYMBOL(llog_receptor_accept);
new_imp = ctxt->loc_obd->u.cli.cl_import;
LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
"%p - %p\n", ctxt->loc_imp, new_imp);
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != new_imp) {
if (ctxt->loc_imp)
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = class_import_get(new_imp);
}
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
RETURN(0);
}
EXPORT_SYMBOL(llog_initiator_connect);
if (!idarray)
GOTO(release_ctxt, rc = -ENOMEM);
- cfs_mutex_down(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_lock(&obd->obd_olg.olg_cat_processing);
rc = llog_get_cat_list(obd, name, 0, count, idarray);
if (rc)
GOTO(out_free, rc);
out_pop:
pop_ctxt(&saved, &ctxt->loc_exp->exp_obd->obd_lvfs_ctxt, NULL);
out_free:
- cfs_mutex_up(&obd->obd_olg.olg_cat_processing);
+ cfs_mutex_unlock(&obd->obd_olg.olg_cat_processing);
OBD_FREE_LARGE(idarray, size);
release_ctxt:
llog_ctxt_put(ctxt);
#include <obd_class.h>
#include "ptlrpc_internal.h"
-cfs_semaphore_t pinger_sem;
+cfs_mutex_t pinger_mutex;
static CFS_LIST_HEAD(pinger_imports);
static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
struct ptlrpc_request *
cfs_time_t timeout = PING_INTERVAL;
/* The timeout list is a increase order sorted list */
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
int ti_timeout = item->ti_timeout;
if (timeout > ti_timeout)
timeout = ti_timeout;
break;
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
cfs_time_current());
struct timeout_item *item;
cfs_list_t *iter;
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
item->ti_cb(item, item->ti_cb_data);
}
cfs_time_seconds(PING_INTERVAL))))
ptlrpc_update_next_ping(imp, 0);
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
/* update memory usage info */
obd_update_maxusage();
RETURN(-EALREADY);
ptlrpc_pinger_remove_timeouts();
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
thread_set_flags(pinger_thread, SVC_STOPPING);
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
l_wait_event(pinger_thread->t_ctl_waitq,
thread_is_stopped(pinger_thread), &lwi);
if (!cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we add to pinger we want recovery on this import */
class_import_get(imp);
ptlrpc_pinger_wake_up();
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
RETURN(0);
}
if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we remove from pinger we don't want recovery on this import */
imp->imp_obd->obd_no_recov = 1;
class_import_put(imp);
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
RETURN(0);
}
{
struct timeout_item *item, *tmp;
- LASSERT_SEM_LOCKED(&pinger_sem);
+ LASSERT_MUTEX_LOCKED(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain)
if (item->ti_event == event)
{
struct timeout_item *ti;
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
if (!ti) {
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
return (-EINVAL);
}
cfs_list_add(obd_list, &ti->ti_obd_list);
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
return 0;
}
if (cfs_list_empty(obd_list))
return 0;
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_del_init(obd_list);
/**
* If there are no obd attached to the timeout event
cfs_list_del(&ti->ti_chain);
OBD_FREE_PTR(ti);
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
return 0;
}
{
struct timeout_item *item, *tmp;
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
LASSERT(cfs_list_empty(&item->ti_obd_list));
cfs_list_del(&item->ti_chain);
OBD_FREE_PTR(item);
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
return 0;
}
set = pd->pd_set;
/* add rpcs into set */
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_for_each(iter, &pinger_imports) {
struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
imp_pinger_chain);
}
}
pd->pd_this_ping = curtime;
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
/* Might be empty, that's OK. */
if (cfs_atomic_read(&set->set_remaining) == 0)
}
/* Expire all the requests that didn't come back. */
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_for_each(iter, &set->set_requests) {
req = cfs_list_entry(iter, struct ptlrpc_request,
rq_set_chain);
cfs_spin_unlock(&imp->imp_lock);
cfs_atomic_dec(&set->set_remaining);
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
ptlrpc_set_destroy(set);
pd->pd_set = NULL;
void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
{
#ifdef ENABLE_PINGER
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
ptlrpc_update_next_ping(imp, 0);
if (pinger_args.pd_set == NULL &&
cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
#endif
}
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
#ifdef ENABLE_PINGER
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
ptlrpc_update_next_ping(imp, 1);
if (pinger_args.pd_set == NULL &&
cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
#endif
}
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
ptlrpc_pinger_sending_on_import(imp);
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
RETURN(0);
}
if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- cfs_mutex_down(&pinger_sem);
+ cfs_mutex_lock(&pinger_mutex);
cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
class_import_put(imp);
- cfs_mutex_up(&pinger_sem);
+ cfs_mutex_unlock(&pinger_mutex);
RETURN(0);
}
extern cfs_spinlock_t ptlrpc_rs_debug_lock;
#endif
extern cfs_spinlock_t ptlrpc_all_services_lock;
-extern cfs_semaphore_t pinger_sem;
-extern cfs_semaphore_t ptlrpcd_sem;
+extern cfs_mutex_t pinger_mutex;
+extern cfs_mutex_t ptlrpcd_mutex;
__init int ptlrpc_init(void)
{
cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
cfs_spin_lock_init(&ptlrpc_all_services_lock);
- cfs_init_mutex(&pinger_sem);
- cfs_init_mutex(&ptlrpcd_sem);
+ cfs_mutex_init(&pinger_mutex);
+ cfs_mutex_init(&ptlrpcd_mutex);
ptlrpc_init_xid();
rc = req_layout_init();
#endif
static struct ptlrpcd *ptlrpcds;
-cfs_semaphore_t ptlrpcd_sem;
+cfs_mutex_t ptlrpcd_mutex;
static int ptlrpcd_users = 0;
void ptlrpcd_wake(struct ptlrpc_request *req)
int rc = 0;
ENTRY;
- cfs_mutex_down(&ptlrpcd_sem);
+ cfs_mutex_lock(&ptlrpcd_mutex);
if (++ptlrpcd_users == 1)
rc = ptlrpcd_init();
- cfs_mutex_up(&ptlrpcd_sem);
+ cfs_mutex_unlock(&ptlrpcd_mutex);
RETURN(rc);
}
void ptlrpcd_decref(void)
{
- cfs_mutex_down(&ptlrpcd_sem);
+ cfs_mutex_lock(&ptlrpcd_mutex);
if (--ptlrpcd_users == 0)
ptlrpcd_fini();
- cfs_mutex_up(&ptlrpcd_sem);
+ cfs_mutex_unlock(&ptlrpcd_mutex);
}
/** @} ptlrpcd */
llcd_print(llcd, __FUNCTION__, __LINE__);
LBUG();
}
- LASSERT_SEM_LOCKED(&ctxt->loc_sem);
+ LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex);
if (llcd->llcd_cookiebytes == 0)
GOTO(exit, rc = 0);
llcd_attach(struct llog_ctxt *ctxt, struct llog_canceld_ctxt *llcd)
{
LASSERT(ctxt != NULL && llcd != NULL);
- LASSERT_SEM_LOCKED(&ctxt->loc_sem);
+ LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex);
LASSERT(ctxt->loc_llcd == NULL);
llcd->llcd_ctxt = llog_ctxt_get(ctxt);
ctxt->loc_llcd = llcd;
struct llog_canceld_ctxt *llcd;
LASSERT(ctxt != NULL);
- LASSERT_SEM_LOCKED(&ctxt->loc_sem);
+ LASSERT_MUTEX_LOCKED(&ctxt->loc_mutex);
llcd = ctxt->loc_llcd;
if (!llcd)
/*
* Start recovery in separate thread.
*/
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
ctxt->loc_gen = *gen;
rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
RETURN(rc);
}
LASSERT(ctxt != NULL);
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
if (!ctxt->loc_lcm) {
CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
GOTO(out, rc = -ENODEV);
out:
if (rc)
llcd_put(ctxt);
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
/*
* Flush any remaining llcd.
*/
- cfs_mutex_down(&ctxt->loc_sem);
+ cfs_mutex_lock(&ctxt->loc_mutex);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
/*
* This is ost->mds connection, we can't be sure that mds
*/
CDEBUG(D_RPCTRACE, "Kill cached llcd\n");
llcd_put(ctxt);
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
} else {
/*
* This is either llog_sync() from generic llog code or sync
* llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
- cfs_mutex_up(&ctxt->loc_sem);
+ cfs_mutex_unlock(&ctxt->loc_mutex);
rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
}
RETURN(rc);
static
struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
- static CFS_DECLARE_MUTEX(load_mutex);
+ static CFS_DEFINE_MUTEX(load_mutex);
static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
struct ptlrpc_sec_policy *policy;
__u16 number = SPTLRPC_FLVR_POLICY(flavor);
break;
/* try to load gss module, once */
- cfs_mutex_down(&load_mutex);
+ cfs_mutex_lock(&load_mutex);
if (cfs_atomic_read(&loaded) == 0) {
if (cfs_request_module("ptlrpc_gss") == 0)
CDEBUG(D_SEC,
cfs_atomic_set(&loaded, 1);
}
- cfs_mutex_up(&load_mutex);
+ cfs_mutex_unlock(&load_mutex);
}
return policy;
sptlrpc_flavor2name(&sf, str, sizeof(str)));
}
- cfs_mutex_down(&imp->imp_sec_mutex);
+ cfs_mutex_lock(&imp->imp_sec_mutex);
newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
if (newsec) {
rc = -EPERM;
}
- cfs_mutex_up(&imp->imp_sec_mutex);
+ cfs_mutex_unlock(&imp->imp_sec_mutex);
out:
sptlrpc_sec_put(sec);
RETURN(rc);
static int enc_pools_add_pages(int npages)
{
- static CFS_DECLARE_MUTEX(sem_add_pages);
+ static CFS_DEFINE_MUTEX(add_pages_mutex);
cfs_page_t ***pools;
int npools, alloced = 0;
int i, j, rc = -ENOMEM;
if (npages < PTLRPC_MAX_BRW_PAGES)
npages = PTLRPC_MAX_BRW_PAGES;
- cfs_down(&sem_add_pages);
+ cfs_mutex_lock(&add_pages_mutex);
if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
CERROR("Failed to allocate %d enc pages\n", npages);
}
- cfs_up(&sem_add_pages);
+ cfs_mutex_unlock(&add_pages_mutex);
return rc;
}
#ifdef HAVE_QUOTA_SUPPORT
-/* lock ordering: mds->mds_qonoff_sem > dquot->dq_sem > lqs->lqs_lock */
+/* lock ordering: mds->mds_qonoff_sem > dquot->dq_mutex > lqs->lqs_lock */
static cfs_list_t lustre_dquot_hash[NR_DQHASH];
static cfs_rwlock_t dquot_hash_lock = CFS_RW_LOCK_UNLOCKED;
RETURN(NULL);
CFS_INIT_LIST_HEAD(&dquot->dq_hash);
- cfs_init_mutex_locked(&dquot->dq_sem);
+ cfs_mutex_init(&dquot->dq_mutex);
+ cfs_mutex_lock(&dquot->dq_mutex);
cfs_atomic_set(&dquot->dq_refcnt, 1);
dquot->dq_info = lqi;
dquot->dq_id = id;
if ((dquot = find_dquot(hashent, lqi, id, type)) != NULL) {
cfs_atomic_inc(&dquot->dq_refcnt);
cfs_read_unlock(&dquot_hash_lock);
+ cfs_mutex_unlock(&empty->dq_mutex);
free_dq = 1;
} else {
int rc;
dquot = empty;
rc = fsfilt_dquot(obd, dquot, QFILE_RD_DQUOT);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
if (rc) {
CERROR("can't read dquot from admin quotafile! "
"(rc:%d)\n", rc);
if (!oqaq)
GOTO(out, rc = -ENOMEM);
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
init_oqaq(oqaq, qctxt, id, type);
rc = dquot_create_oqaq(qctxt, dquot, ost_num, mdt_num,
else
qid[USRQUOTA] = dquot->dq_id;
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
rc = qctxt_adjust_qunit(obd, qctxt, qid, is_blk, 0, NULL);
if (rc == -EDQUOT || rc == -EBUSY) {
RETURN(rc);
out_sem:
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
goto out;
}
DQUOT_DEBUG(dquot, "get dquot in dqacq_handler\n");
QINFO_DEBUG(dquot->dq_info, "get dquot in dqadq_handler\n");
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
if (QDATA_IS_BLK(qdata)) {
grace = info->qi_info[QDATA_IS_GRP(qdata)].dqi_bgrace;
rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
EXIT;
out:
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
cfs_up_write(&mds->mds_qonoff_sem);
lustre_dqput(dquot);
if (rc != -EDQUOT)
if (!dquot || !oqaq)
RETURN(-EINVAL);
- LASSERT_SEM_LOCKED(&dquot->dq_sem);
+ LASSERT_MUTEX_LOCKED(&dquot->dq_mutex);
LASSERT(oqaq->qaq_iunit_sz);
LASSERT(oqaq->qaq_bunit_sz);
if (rc)
GOTO(out, rc);
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
cfs_spin_lock(&lqs->lqs_lock);
if (LQS_IS_SETQUOTA(lqs) || LQS_IS_RECOVERY(lqs)) {
cfs_spin_unlock(&lqs->lqs_lock);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
GOTO(skip, rc = -EBUSY);
}
LQS_SET_SETQUOTA(lqs);
rc = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
if (rc) {
CERROR("set limit failed! (rc:%d)\n", rc);
orig_set = ihardlimit || isoftlimit;
now_set = dqblk->dqb_ihardlimit || dqblk->dqb_isoftlimit;
if (dqblk->dqb_valid & QIF_ILIMITS && orig_set != now_set) {
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
dquot->dq_dqb.dqb_curinodes = 0;
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
rc = mds_init_slave_ilimits(obd, oqctl, orig_set);
if (rc) {
CERROR("init slave ilimits failed! (rc:%d)\n", rc);
orig_set = bhardlimit || bsoftlimit;
now_set = dqblk->dqb_bhardlimit || dqblk->dqb_bsoftlimit;
if (dqblk->dqb_valid & QIF_BLIMITS && orig_set != now_set) {
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
dquot->dq_dqb.dqb_curspace = 0;
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
rc = mds_init_slave_blimits(obd, oqctl, orig_set);
if (rc) {
CERROR("init slave blimits failed! (rc:%d)\n", rc);
revoke_out:
cfs_down_write(&mds->mds_qonoff_sem);
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
if (rc) {
/* cancel previous setting */
dquot->dq_dqb.dqb_ihardlimit = ihardlimit;
dquot->dq_dqb.dqb_itime = itime;
}
rc2 = fsfilt_dquot(obd, dquot, QFILE_WR_DQUOT);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
update_fail:
cfs_spin_lock(&lqs->lqs_lock);
LQS_CLEAR_SETQUOTA(lqs);
if (IS_ERR(dquot))
GOTO(out, rc = PTR_ERR(dquot));
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
dqblk->dqb_ihardlimit = dquot->dq_dqb.dqb_ihardlimit;
dqblk->dqb_isoftlimit = dquot->dq_dqb.dqb_isoftlimit;
dqblk->dqb_bhardlimit = dquot->dq_dqb.dqb_bhardlimit;
dqblk->dqb_btime = dquot->dq_dqb.dqb_btime;
dqblk->dqb_itime = dquot->dq_dqb.dqb_itime;
dqblk->dqb_valid |= QIF_LIMITS | QIF_TIMES;
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
lustre_dqput(dquot);
cfs_up_read(&mds->mds_qonoff_sem);
if (rc)
GOTO(skip, rc);
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
/* don't recover the dquot without limits or quota is setting or
* another recovery is already going on */
if (!(dquot->dq_dqb.dqb_bhardlimit || dquot->dq_dqb.dqb_bsoftlimit) ||
LQS_IS_SETQUOTA(lqs) || LQS_IS_RECOVERY(lqs)) {
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
GOTO(skip1, rc = 0);
}
cfs_spin_lock(&lqs->lqs_lock);
LQS_SET_RECOVERY(lqs);
cfs_spin_unlock(&lqs->lqs_lock);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
/* release mds_qonoff_sem during obd_quotactl ops here */
cfs_up_write(&mds->mds_qonoff_sem);
total_limits += qctl->qc_dqblk.dqb_bhardlimit;
/* amend the usage of the administrative quotafile */
- cfs_down(&dquot->dq_sem);
+ cfs_mutex_lock(&dquot->dq_mutex);
dquot->dq_dqb.dqb_curspace = total_limits << QUOTABLOCK_BITS;
if (rc)
CERROR("write dquot failed! (rc:%d)\n", rc);
- cfs_up(&dquot->dq_sem);
+ cfs_mutex_unlock(&dquot->dq_mutex);
EXIT;
out:
cfs_spin_lock(&lqs->lqs_lock);
if (unlikely(!mds->mds_quota || obd->obd_stopping))
RETURN(rc);
- cfs_mutex_down(&obd->obd_dev_sem);
+ cfs_mutex_lock(&obd->obd_dev_mutex);
if (mds->mds_lov_desc.ld_active_tgt_count != mds->mds_lov_objid_count) {
CWARN("Only %u/%u OSTs are active, abort quota recovery\n",
mds->mds_lov_desc.ld_active_tgt_count,
mds->mds_lov_objid_count);
- cfs_mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_unlock(&obd->obd_dev_mutex);
RETURN(rc);
}
- cfs_mutex_up(&obd->obd_dev_sem);
+ cfs_mutex_unlock(&obd->obd_dev_mutex);
data.obd = obd;
cfs_init_completion(&data.comp);