From 8b39cdabac857e8700ffa7f3e00459833d08e728 Mon Sep 17 00:00:00 2001 From: bobijam Date: Mon, 18 Jun 2007 03:33:55 +0000 Subject: [PATCH] Branch b1_6 b=12689 i=green, vitaly Description: replay-single.sh test 52 fails Details : A lock's skiplist need to be cleanup when it being unlinked from its resource list. --- lustre/ChangeLog | 6 ++++++ lustre/include/lustre_dlm.h | 1 + lustre/ldlm/ldlm_lock.c | 13 +++++-------- lustre/ldlm/ldlm_resource.c | 1 + 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 50f6155..5b3f40e 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -305,6 +305,12 @@ Description: ASSERTION(req->rq_type != LI_POISON) failed Details : imp_lock should be held while iterating over imp_sending_list for prevent destroy request after get timeout in ptlrpc_queue_wait. +Severity : normal +Bugzilla : 12689 +Description: replay-single.sh test 52 fails +Details : A lock's skiplist need to be cleanup when it being unlinked + from its resource list. + -------------------------------------------------------------------------------- 2007-05-03 Cluster File Systems, Inc. diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 632b7bb..61dfc23 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -512,6 +512,7 @@ void ldlm_reprocess_all(struct ldlm_resource *res); void ldlm_reprocess_all_ns(struct ldlm_namespace *ns); void ldlm_lock_dump(int level, struct ldlm_lock *lock, int pos); void ldlm_lock_dump_handle(int level, struct lustre_handle *); +void ldlm_unlink_lock_skiplist(struct ldlm_lock *req); /* resource.c */ struct ldlm_namespace *ldlm_namespace_new(char *name, __u32 local); diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index b02a08e..7e43e3f 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1397,9 +1397,13 @@ void ldlm_cancel_callback(struct ldlm_lock *lock) } } -static void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) +void ldlm_unlink_lock_skiplist(struct ldlm_lock *req) { struct ldlm_lock *lock; + + if (req->l_resource->lr_type != LDLM_PLAIN && + req->l_resource->lr_type != LDLM_IBITS) + return; if (LDLM_SL_HEAD(&req->l_sl_mode)) { lock = list_entry(req->l_res_link.next, struct ldlm_lock, @@ -1472,11 +1476,6 @@ void ldlm_lock_cancel(struct ldlm_lock *lock) /* Yes, second time, just in case it was added again while we were running with no res lock in ldlm_cancel_callback */ ldlm_del_waiting_lock(lock); - if (!(LDLM_SL_EMPTY(&lock->l_sl_mode) && - LDLM_SL_EMPTY(&lock->l_sl_policy)) && - (lock->l_resource->lr_type == LDLM_PLAIN || - lock->l_resource->lr_type == LDLM_IBITS)) - ldlm_unlink_lock_skiplist(lock); ldlm_resource_unlink_lock(lock); ldlm_lock_destroy_nolock(lock); unlock_res_and_lock(lock); @@ -1574,8 +1573,6 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, else if (lock->l_res_link.next != &res->lr_granted) mark_lock = list_entry(lock->l_res_link.next, struct ldlm_lock, l_res_link); - if (join != LDLM_JOIN_NONE) - ldlm_unlink_lock_skiplist(lock); } ldlm_resource_unlink_lock(lock); diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index b209f13..8f5425e 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -749,6 +749,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original, void ldlm_resource_unlink_lock(struct ldlm_lock *lock) { check_res_locked(lock->l_resource); + ldlm_unlink_lock_skiplist(lock); list_del_init(&lock->l_res_link); } -- 1.8.3.1