From bca497216066b1c8320ef6ccdf4aa2406071f975 Mon Sep 17 00:00:00 2001 From: vitaly Date: Thu, 10 Apr 2008 11:55:07 +0000 Subject: [PATCH] Branch b1_6 b=14661 i=green i=shadow -- remove BL_AST lock from lru once we mark the lock as a such; -- do not place BL_AST lock into lru even if the lock is matched and put later; => no BL_AST locks in lru at all; -- CANCELING locks are not so numerous, but to avoid its traversal by another thread another time, drop them from the unused list once come them across. --- lustre/ldlm/ldlm_lock.c | 3 ++- lustre/ldlm/ldlm_lockd.c | 6 ++++++ lustre/ldlm/ldlm_request.c | 23 ++++++++++++++--------- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 8fc306a..2bddda6 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -639,7 +639,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) ldlm_handle_bl_callback(ns, NULL, lock); } else if (ns_is_client(ns) && !lock->l_readers && !lock->l_writers && - !(lock->l_flags & LDLM_FL_NO_LRU)) { + !(lock->l_flags & LDLM_FL_NO_LRU) && + !(lock->l_flags & LDLM_FL_BL_AST)) { /* If this is a client-side namespace and this was the last * reference, put it on the LRU. */ ldlm_lock_add_to_lru(lock); diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 2764595..850f540 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -1361,6 +1361,9 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req, } if (dlm_req->lock_flags & LDLM_FL_AST_SENT) { + /* BL_AST locks are not needed in lru. + * let ldlm_cancel_lru() be fast. */ + ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST; LDLM_DEBUG(lock, "completion AST includes blocking AST"); } @@ -1634,6 +1637,9 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) ldlm_callback_reply(req, -EINVAL); RETURN(0); } + /* BL_AST locks are not needed in lru. + * let ldlm_cancel_lru() be fast. */ + ldlm_lock_remove_from_lru(lock); lock->l_flags |= LDLM_FL_BL_AST; } unlock_res_and_lock(lock); diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 3ee57d1..aa3d1ae 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -1312,7 +1312,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, int count, int max, int cancel_flags, int flags) { ldlm_cancel_lru_policy_t pf; - struct ldlm_lock *lock; + struct ldlm_lock *lock, *next; int added = 0, unused; ENTRY; @@ -1330,12 +1330,16 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, if (max && added >= max) break; - list_for_each_entry(lock, &ns->ns_unused_list, l_lru) { - /* Somebody is already doing CANCEL or there is a - * blocking request will send cancel. */ - if (!(lock->l_flags & LDLM_FL_CANCELING) && - !(lock->l_flags & LDLM_FL_BL_AST)) + list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){ + /* No locks which got blocking requests. */ + LASSERT(!(lock->l_flags & LDLM_FL_BL_AST)); + + /* Somebody is already doing CANCEL. No need in this + * lock in lru, do not traverse it again. */ + if (!(lock->l_flags & LDLM_FL_CANCELING)) break; + + ldlm_lock_remove_from_lru_nolock(lock); } if (&lock->l_lru == &ns->ns_unused_list) break; @@ -1362,12 +1366,12 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels, lock_res_and_lock(lock); /* Check flags again under the lock. */ if ((lock->l_flags & LDLM_FL_CANCELING) || - (lock->l_flags & LDLM_FL_BL_AST) || (ldlm_lock_remove_from_lru(lock) == 0)) { /* other thread is removing lock from lru or * somebody is already doing CANCEL or * there is a blocking request which will send - * cancel by itseft. */ + * cancel by itseft or the lock is matched + * is already not unused. */ unlock_res_and_lock(lock); LDLM_LOCK_PUT(lock); spin_lock(&ns->ns_unused_lock); @@ -1688,7 +1692,8 @@ int ldlm_cli_join_lru(struct ldlm_namespace *ns, if (list_empty(&lock->l_lru) && !lock->l_readers && !lock->l_writers && !(lock->l_flags & LDLM_FL_LOCAL) && - !(lock->l_flags & LDLM_FL_CBPENDING)) { + !(lock->l_flags & LDLM_FL_CBPENDING) && + !(lock->l_flags & LDLM_FL_BL_AST)) { ldlm_lock_add_to_lru(lock); lock->l_flags &= ~LDLM_FL_NO_LRU; LDLM_DEBUG(lock, "join lock to lru"); -- 1.8.3.1