ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
- struct lustre_handle lockh;
- int rc;
ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
/* client side - set flag to prevent lock from being put on lru list */
lock->l_flags |= LDLM_FL_CBPENDING;
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
- ldlm_lock2handle(lock, &lockh);
- rc = ldlm_cli_cancel(&lockh);
- if (rc != ELDLM_OK)
- CERROR("ldlm_cli_cancel: %d\n", rc);
-
EXIT;
}
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
* server already dropped it anyway. Only for granted locks too. */
- lock_res_and_lock(lock);
if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
(LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- unlock_res_and_lock(lock);
if (lock->l_req_mode == lock->l_granted_mode &&
lock->l_granted_mode != LCK_NL &&
NULL == data)
ldlm_lock_decref_internal(lock, lock->l_req_mode);
+ /* Need to wake up the waiter if we were evicted */
+ cfs_waitq_signal(&lock->l_waitq);
RETURN(0);
}
- unlock_res_and_lock(lock);
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- lock_res_and_lock(lock);
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
+ if (lock->l_destroyed) {
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- unlock_res_and_lock(lock);
+ RETURN(0);
+ }
+
+ if (lock->l_flags & LDLM_FL_FAILED) {
+ LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
RETURN(-EIO);
}
+
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
- unlock_res_and_lock(lock);
RETURN(rc);
}
list_del_init(&lock->l_flock_waitq);
spin_unlock(&ldlm_flock_waitq_lock);
+ lock_res_and_lock(lock);
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
}
static void failed_lock_cleanup(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- struct lustre_handle *lockh, int mode)
+ struct ldlm_lock *lock, int mode)
{
int need_cancel = 0;
* bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645 */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
- LDLM_FL_ATOMIC_CB;
+ LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
need_cancel = 1;
}
unlock_res_and_lock(lock);
- if (need_cancel) {
+ if (need_cancel)
LDLM_DEBUG(lock,
"setting FL_LOCAL_ONLY | LDLM_FL_FAILED | "
- "LDLM_FL_ATOMIC_CB");
- ldlm_lock_decref_and_cancel(lockh, mode);
- } else {
+ "LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
+ else
LDLM_DEBUG(lock, "lock was granted or failed in race");
- ldlm_lock_decref(lockh, mode);
- }
+
+ ldlm_lock_decref_internal(lock, mode);
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock(). */
+ /* This code makes for the fact that we do not have blocking handler on
+ * a client for flock locks. As such this is the place where we must
+ * completely kill failed locks. (interrupted and those that
+ * were waiting to be granted when server evicted us. */
if (lock->l_resource->lr_type == LDLM_FLOCK) {
- ldlm_lock_destroy(lock);
+ lock_res_and_lock(lock);
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_destroy_nolock(lock);
+ unlock_res_and_lock(lock);
}
}
int err = lock->l_completion_ast(lock, *flags, NULL);
if (!rc)
rc = err;
- if (rc && type != LDLM_FLOCK) /* bug 9425, bug 10250 */
+ if (rc)
cleanup_phase = 1;
}
}
EXIT;
cleanup:
if (cleanup_phase == 1 && rc)
- failed_lock_cleanup(ns, lock, lockh, mode);
+ failed_lock_cleanup(ns, lock, mode);
/* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
LDLM_LOCK_PUT(lock);
LDLM_LOCK_PUT(lock);
if (reqp == NULL || *reqp == NULL) {
req = ldlm_prep_enqueue_req(exp, 2, size, NULL, 0);
if (req == NULL) {
- failed_lock_cleanup(ns, lock, lockh, einfo->ei_mode);
+ failed_lock_cleanup(ns, lock, einfo->ei_mode);
LDLM_LOCK_PUT(lock);
RETURN(-ENOMEM);
}