/* XXX until we will have compound requests and can cut cancels from generic rpc
* we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-static int ldlm_cancel_list(cfs_list_t *cancels, int count,
- ldlm_cancel_flags_t flags)
+int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
+ ldlm_cancel_flags_t flags)
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
*
* flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy".
*/
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
- int count, int max, ldlm_cancel_flags_t cancel_flags,
- int flags)
+static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
+ int count, int max, int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
unused--;
}
cfs_spin_unlock(&ns->ns_unused_lock);
- RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
+ RETURN(added);
+}
+
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
+ int count, int max, ldlm_cancel_flags_t cancel_flags,
+ int flags)
+{
+ int added;
+ added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
+ if (added <= 0)
+ return added;
+ return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
}
/* when called with LDLM_ASYNC the blocking callback will be handled
#ifndef __KERNEL__
mode = LDLM_SYNC; /* force to be sync in user space */
#endif
- count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags);
-
+ /* Just prepare the list of locks, do not actually cancel them yet.
+ * Locks are cancelled later in a separate thread. */
+ count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode);
if (rc == 0)
RETURN(count);
}
unlock_res(res);
- RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
+ RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags));
}
/* If @req is NULL, send CANCEL request to server with handles of locks