struct obd_device;
struct mdc_rpc_lock {
- cfs_mutex_t rpcl_mutex;
+ struct mutex rpcl_mutex;
struct lookup_intent *rpcl_it;
int rpcl_fakes;
};
static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
{
- cfs_mutex_init(&lck->rpcl_mutex);
+ mutex_init(&lck->rpcl_mutex);
lck->rpcl_it = NULL;
}
* Only when all fake requests are finished can normal requests
* be sent, to ensure they are recoverable again. */
again:
- cfs_mutex_lock(&lck->rpcl_mutex);
+ mutex_lock(&lck->rpcl_mutex);
if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
lck->rpcl_it = MDC_FAKE_RPCL_IT;
lck->rpcl_fakes++;
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
return;
}
* in this extremely rare case, just have low overhead in
* the common case when it isn't true. */
while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
cfs_schedule_timeout(cfs_time_seconds(1) / 4);
goto again;
}
goto out;
if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
- cfs_mutex_lock(&lck->rpcl_mutex);
+ mutex_lock(&lck->rpcl_mutex);
LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
lck->rpcl_fakes--;
lck->rpcl_it = NULL;
}
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
out:
EXIT;
}