void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
struct cl_lock *lock);
__u64 l_flags;
__u32 l_readers;
__u32 l_writers;
- /*
- * Set for locks that were removed from class hash table and will be
- * destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks.
- */
- __u8 l_destroyed;
- /**
- * flag whether this is a server namespace lock
- */
- __u8 l_ns_srv;
/**
* If the lock is granted, a process sleeps on this waitq to learn when
* it's no longer in use. If the lock is not granted, a process sleeps
struct ldlm_extent l_req_extent;
+ unsigned int l_failed:1,
+ /*
+ * Set for locks that were removed from class hash table and will be
+ * destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks.
+ */
+ l_destroyed:1,
+ /**
+ * flag whether this is a server namespace lock.
+ */
+ l_ns_srv:1;
+
/*
* Client-side-only members.
*/
- int l_fail_value;
/**
* Temporary storage for an LVB received during an enqueue operation.
*/
int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock, int rc);
-void ldlm_lock_fail_match(struct ldlm_lock *lock, int rc);
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
+void ldlm_lock_fail_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, int flags,
if (!unref &&
(lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_fail_value != 0))
+ lock->l_failed))
continue;
if ((flags & LDLM_FL_LOCAL_ONLY) &&
return NULL;
}
-void ldlm_lock_fail_match_locked(struct ldlm_lock *lock, int rc)
+void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
{
- if (lock->l_fail_value == 0) {
- lock->l_fail_value = rc;
- cfs_waitq_signal(&lock->l_waitq);
+ if (!lock->l_failed) {
+ lock->l_failed = 1;
+ cfs_waitq_broadcast(&lock->l_waitq);
}
}
EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
-void ldlm_lock_fail_match(struct ldlm_lock *lock, int rc)
+void ldlm_lock_fail_match(struct ldlm_lock *lock)
{
lock_res_and_lock(lock);
- ldlm_lock_fail_match_locked(lock, rc);
+ ldlm_lock_fail_match_locked(lock);
unlock_res_and_lock(lock);
}
EXPORT_SYMBOL(ldlm_lock_fail_match);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
lock->l_flags |= LDLM_FL_LVB_READY;
- cfs_waitq_signal(&lock->l_waitq);
+ cfs_waitq_broadcast(&lock->l_waitq);
}
void ldlm_lock_allow_match(struct ldlm_lock *lock)
/* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
l_wait_event(lock->l_waitq,
lock->l_flags & LDLM_FL_LVB_READY ||
- lock->l_fail_value != 0,
+ lock->l_failed,
&lwi);
if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
if (flags & LDLM_FL_TEST_LOCK)
if (lock != NULL) {
lock_res_and_lock(lock);
if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_fail_value != 0)
+ lock->l_failed)
GOTO(out, mode);
if (lock->l_flags & LDLM_FL_CBPENDING &&
lock->l_req_mode = mode;
lock->l_ast_data = data;
lock->l_pid = cfs_curproc_pid();
- lock->l_ns_srv = ns_is_server(ns);
+ lock->l_ns_srv = !!ns_is_server(ns);
if (cbs) {
lock->l_blocking_ast = cbs->lcs_blocking;
lock->l_completion_ast = cbs->lcs_completion;
cl_lock_get_trust(lock);
cfs_spin_unlock(&head->coh_lock_guard);
lu_ref_add(&lock->cll_reference, "prune", cfs_current());
+
+again:
cl_lock_mutex_get(env, lock);
if (lock->cll_state < CLS_FREEING) {
LASSERT(lock->cll_holds == 0);
- LASSERT(lock->cll_users == 0);
+ LASSERT(lock->cll_users <= 1);
+ if (unlikely(lock->cll_users == 1)) {
+ struct l_wait_info lwi = { 0 };
+
+ cl_lock_mutex_put(env, lock);
+ l_wait_event(lock->cll_wq,
+ lock->cll_users == 0,
+ &lwi);
+ goto again;
+ }
+
if (cancel)
cl_lock_cancel(env, lock);
cl_lock_delete(env, lock);
}
EXPORT_SYMBOL(cl_lock_user_add);
-int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
+void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
cl_lock_used_mod(env, lock, -1);
- RETURN(lock->cll_users == 0);
+ if (lock->cll_users == 0)
+ cfs_waitq_broadcast(&lock->cll_wq);
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_user_del);
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
cfs_spin_unlock(&osc_ast_guard);
- ldlm_lock_fail_match_locked(dlmlock, rc);
+ ldlm_lock_fail_match_locked(dlmlock);
unlock_res_and_lock(dlmlock);
LDLM_LOCK_PUT(dlmlock);
}
} else {
- if (olck->ols_glimpse)
+ if (olck->ols_glimpse) {
olck->ols_glimpse = 0;
+ olck->ols_agl = 0 ;
+ }
osc_lock_upcall0(env, olck);
}
if (lock != NULL) {
if (rc != ELDLM_OK)
- ldlm_lock_fail_match(lock, rc);
+ ldlm_lock_fail_match(lock);
LDLM_LOCK_PUT(lock);
}