From d8adb5057d9fd878cc80f881202c96d18eb7a359 Mon Sep 17 00:00:00 2001 From: Liang Zhen Date: Tue, 21 Dec 2010 09:26:08 +0800 Subject: [PATCH] b24336 ldlm_resource::lr_lvb_data is protected by wrong lock - ldlm_resource::lr_lvb_data should always be protected by lr_lvb_sem - cleanup some unnecessary lock dance i=vitaly.fertman i=green --- lustre/ldlm/ldlm_lock.c | 29 ++++++++++++++--------------- lustre/ldlm/ldlm_lockd.c | 12 ++++++------ 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index d788767..c9f4536 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -471,8 +471,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock, struct lustre_handle *lockh) struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, int flags) { - struct ldlm_namespace *ns; - struct ldlm_lock *lock, *retval = NULL; + struct ldlm_lock *lock; ENTRY; LASSERT(handle); @@ -481,36 +480,36 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, if (lock == NULL) RETURN(NULL); - LASSERT(lock->l_resource != NULL); - ns = ldlm_lock_to_ns(lock); - LASSERT(ns != NULL); + /* It's unlikely but possible that someone marked the lock as + * destroyed after we did handle2object on it */ + if (flags == 0 && !lock->l_destroyed) { + lu_ref_add(&lock->l_reference, "handle", cfs_current()); + RETURN(lock); + } - lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current()); lock_res_and_lock(lock); - /* It's unlikely but possible that someone marked the lock as - * destroyed after we did handle2object on it */ - if (lock->l_destroyed) { + LASSERT(lock->l_resource != NULL); + + lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current()); + if (unlikely(lock->l_destroyed)) { unlock_res_and_lock(lock); CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock); LDLM_LOCK_PUT(lock); - GOTO(out, retval); + RETURN(NULL); } if (flags && (lock->l_flags & flags)) { unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); - GOTO(out, retval); + RETURN(NULL); } if (flags) lock->l_flags |= flags; unlock_res_and_lock(lock); - retval = lock; - EXIT; - out: - return retval; + RETURN(lock); } void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc) diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index a3f96fc..b750b1c 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -851,10 +851,10 @@ int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data) if (lock->l_resource->lr_lvb_len) { void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB); - lock_res_and_lock(lock); + cfs_down(&lock->l_resource->lr_lvb_sem); memcpy(lvb, lock->l_resource->lr_lvb_data, lock->l_resource->lr_lvb_len); - unlock_res_and_lock(lock); + cfs_up(&lock->l_resource->lr_lvb_sem); } LDLM_DEBUG(lock, "server preparing completion AST (after %lds wait)", @@ -1137,13 +1137,11 @@ existing_lock: * local_lock_enqueue by the policy function. */ cookie = req; } else { - lock_res_and_lock(lock); if (lock->l_resource->lr_lvb_len) { req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lock->l_resource->lr_lvb_len); } - unlock_res_and_lock(lock); if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR)) GOTO(out, rc = -ENOMEM); @@ -1244,7 +1242,6 @@ existing_lock: LDLM_DEBUG(lock, "server-side enqueue handler, sending reply" "(err=%d, rc=%d)", err, rc); - lock_res_and_lock(lock); if (rc == 0) { if (lock->l_resource->lr_lvb_len > 0) { void *lvb; @@ -1254,14 +1251,17 @@ existing_lock: LASSERTF(lvb != NULL, "req %p, lock %p\n", req, lock); + cfs_down(&lock->l_resource->lr_lvb_sem); memcpy(lvb, lock->l_resource->lr_lvb_data, lock->l_resource->lr_lvb_len); + cfs_up(&lock->l_resource->lr_lvb_sem); } } else { + lock_res_and_lock(lock); ldlm_resource_unlink_lock(lock); ldlm_lock_destroy_nolock(lock); + unlock_res_and_lock(lock); } - unlock_res_and_lock(lock); if (!err && dlm_req->lock_desc.l_resource.lr_type != LDLM_FLOCK) ldlm_reprocess_all(lock->l_resource); -- 1.8.3.1