- LDLM_LOCK_GET(lock);
- spin_unlock(&ns->ns_unused_lock);
- lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
-
- /* Pass the lock through the policy filter and see if it
- * should stay in lru.
- *
- * Even for shrinker policy we stop scanning if
- * we find a lock that should stay in the cache.
- * We should take into account lock age anyway
- * as new lock even if it is small of weight is
- * valuable resource.
- *
- * That is, for shrinker policy we drop only
- * old locks, but additionally chose them by
- * their weight. Big extent locks will stay in
- * the cache. */
- if (pf(ns, lock, unused, added, count) ==
- LDLM_POLICY_KEEP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_unused_lock);
- break;
- }
-
- lock_res_and_lock(lock);
- /* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
- /* other thread is removing lock from lru or
- * somebody is already doing CANCEL or
- * there is a blocking request which will send
- * cancel by itseft or the lock is matched
- * is already not unused. */
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
- spin_lock(&ns->ns_unused_lock);
- continue;
- }
- LASSERT(!lock->l_readers && !lock->l_writers);
-
- /* If we have chosen to cancel this lock voluntarily, we
- * better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
- * silently cancelling this lock. */
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
-
- /* Setting the CBPENDING flag is a little misleading,
- * but prevents an important race; namely, once
- * CBPENDING is set, the lock can accumulate no more
- * readers/writers. Since readers and writers are
- * already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
- lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
-
- /* We can't re-add to l_lru as it confuses the
- * refcounting in ldlm_lock_remove_from_lru() if an AST
- * arrives after we drop ns_lock below. We use l_bl_ast
- * and can't use l_pending_chain as it is used both on
- * server and client nevertheless bug 5666 says it is
- * used only on server */
- LASSERT(list_empty(&lock->l_bl_ast));
- list_add(&lock->l_bl_ast, cancels);
- unlock_res_and_lock(lock);
- lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
- spin_lock(&ns->ns_unused_lock);
- added++;
- unused--;
- }
- spin_unlock(&ns->ns_unused_lock);
- RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
-}
-
-/* Returns number of locks which could be canceled next time when
- * ldlm_cancel_lru() is called. Used from locks pool shrinker. */
-int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
- int count, int max, int flags)
-{
- struct list_head disp = CFS_LIST_HEAD_INIT(disp);
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock;
- int added = 0, unused;
- int loop_stop = 0;
- ENTRY;
-
- pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
- spin_lock(&ns->ns_unused_lock);
- unused = ns->ns_nr_unused;
- list_splice_init(&ns->ns_unused_list, &disp);
- while (!list_empty(&disp)) {
- lock = list_entry(disp.next, struct ldlm_lock, l_lru);
- list_move_tail(&lock->l_lru, &ns->ns_unused_list);
-