ldlm_handle_bl_callback(ns, NULL, lock);
} else if (ns_is_client(ns) &&
!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU)) {
+ !(lock->l_flags & LDLM_FL_NO_LRU) &&
+ !(lock->l_flags & LDLM_FL_BL_AST)) {
/* If this is a client-side namespace and this was the last
* reference, put it on the LRU. */
ldlm_lock_add_to_lru(lock);
}
if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
+ /* BL_AST locks are not needed in lru.
+ * let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST");
}
ldlm_callback_reply(req, -EINVAL);
RETURN(0);
}
+ /* BL_AST locks are not needed in lru.
+ * let ldlm_cancel_lru() be fast. */
+ ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_BL_AST;
}
unlock_res_and_lock(lock);
int count, int max, int cancel_flags, int flags)
{
ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock;
+ struct ldlm_lock *lock, *next;
int added = 0, unused;
ENTRY;
if (max && added >= max)
break;
- list_for_each_entry(lock, &ns->ns_unused_list, l_lru) {
- /* Somebody is already doing CANCEL or there is a
- * blocking request will send cancel. */
- if (!(lock->l_flags & LDLM_FL_CANCELING) &&
- !(lock->l_flags & LDLM_FL_BL_AST))
+ list_for_each_entry_safe(lock, next, &ns->ns_unused_list, l_lru){
+ /* No locks which got blocking requests. */
+ LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+
+ /* Somebody is already doing CANCEL. No need in this
+ * lock in lru, do not traverse it again. */
+ if (!(lock->l_flags & LDLM_FL_CANCELING))
break;
+
+ ldlm_lock_remove_from_lru_nolock(lock);
}
if (&lock->l_lru == &ns->ns_unused_list)
break;
lock_res_and_lock(lock);
/* Check flags again under the lock. */
if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST) ||
(ldlm_lock_remove_from_lru(lock) == 0)) {
/* other thread is removing lock from lru or
* somebody is already doing CANCEL or
* there is a blocking request which will send
- * cancel by itseft. */
+ * cancel by itseft or the lock is matched
+ * is already not unused. */
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
spin_lock(&ns->ns_unused_lock);
if (list_empty(&lock->l_lru) &&
!lock->l_readers && !lock->l_writers &&
!(lock->l_flags & LDLM_FL_LOCAL) &&
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !(lock->l_flags & LDLM_FL_CBPENDING) &&
+ !(lock->l_flags & LDLM_FL_BL_AST)) {
ldlm_lock_add_to_lru(lock);
lock->l_flags &= ~LDLM_FL_NO_LRU;
LDLM_DEBUG(lock, "join lock to lru");