}
}
-static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
}
EXIT;
}
+EXPORT_SYMBOL(cl_lock_hold_release);
/**
* Waits until lock state is changed.
case CLS_QUEUING:
/* kick layers. */
result = cl_enqueue_kick(env, lock, io, flags);
- if (result == 0)
+ /* For AGL case, the cl_lock::cll_state may
+ * become CLS_HELD already. */
+ if (result == 0 && lock->cll_state == CLS_QUEUING)
cl_lock_state_set(env, lock, CLS_ENQUEUED);
break;
case CLS_INTRANSIT:
do {
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT);
+ LASSERTF(lock->cll_state == CLS_QUEUING ||
+ lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD ||
+ lock->cll_state == CLS_INTRANSIT,
+ "lock state: %d\n", lock->cll_state);
LASSERT(lock->cll_users > 0);
LASSERT(lock->cll_holds > 0);
again:
cl_lock_mutex_get(env, lock);
if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_holds == 0);
LASSERT(lock->cll_users <= 1);
if (unlikely(lock->cll_users == 1)) {
struct l_wait_info lwi = { 0 };