From 35cced307fe49aed1726fd1fe37b0b168c3bb002 Mon Sep 17 00:00:00 2001 From: yury Date: Tue, 13 Sep 2005 18:42:37 +0000 Subject: [PATCH] - possible fix to #49 with good comment --- lustre/ldlm/ldlm_resource.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index e3c4011..24b8554 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -381,6 +381,7 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags) spin_lock(&ns->ns_hash_lock); tmp = tmp->next; +#if 0 /* XXX what a mess: don't force cleanup if we're * local_only (which is only used by recovery). In that * case, we probably still have outstanding lock refs @@ -394,6 +395,18 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, int flags) atomic_set(&res->lr_refcount, 1); ldlm_resource_putref_locked(res); } +#endif + /* XXX: former stuff caused issues in case of race + * between ldlm_namespace_cleanup() and lockd() when + * client gets blocking ast when lock gets distracted by + * server. This is 1_4 branch solution, let's see how + * will it behave. */ + if (!ldlm_resource_putref_locked(res)) { + CERROR("Namespace %s resource refcount nonzero " + "(%d) after lock cleanup; forcing cleanup.\n", + ns->ns_name, atomic_read(&res->lr_refcount)); + ldlm_resource_dump(D_ERROR, res); + } } spin_unlock(&ns->ns_hash_lock); } -- 1.8.3.1