From 148b4b203b1a88737ee263a136cafd3994a73efe Mon Sep 17 00:00:00 2001 From: green Date: Thu, 3 Apr 2008 03:21:36 +0000 Subject: [PATCH] r=shadow,umka b=14257 Do not include LdLM_FLOCK locks into ldlm_pool sanity calculations. --- lustre/ChangeLog | 7 +++++++ lustre/ldlm/ldlm_pool.c | 10 ++++++++++ 2 files changed, 17 insertions(+) diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 5df195a..3b87f95 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -848,6 +848,13 @@ Details : On SLES10/PPC, fs.h includes idr.h which requires BITS_PER_LONG to be defined. Add a hack in mkfs_lustre.c to work around this compile issue. +Severity : normal +Bugzilla : 14257 +Description: LASSERT on MDS when client holding flock lock dies +Details : ldlm pool logic depends on number of granted locks equal to + number of released locks which is not true for flock locks, so + just exclude such locks from consideration. + -------------------------------------------------------------------------------- 2007-08-10 Cluster File Systems, Inc. diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 541afe1..dcae254 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -667,7 +667,15 @@ EXPORT_SYMBOL(ldlm_pool_fini); void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) { + /* FLOCK locks are special in a sense that they are almost never + * cancelled, instead special kind of lock is used to drop them. + * also there is no LRU for flock locks, so no point in tracking + * them anyway */ + if (lock->l_resource->lr_type == LDLM_FLOCK) + return; + ENTRY; + atomic_inc(&pl->pl_granted); atomic_inc(&pl->pl_grant_rate); atomic_inc(&pl->pl_grant_speed); @@ -686,6 +694,8 @@ EXPORT_SYMBOL(ldlm_pool_add); void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) { + if (lock->l_resource->lr_type == LDLM_FLOCK) + return; ENTRY; LASSERT(atomic_read(&pl->pl_granted) > 0); atomic_dec(&pl->pl_granted); -- 1.8.3.1