ldlm_policy_data_t *policy,
ldlm_mode_t mode, int lock_flags,
ldlm_cancel_flags_t cancel_flags, void *opaque);
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags);
int ldlm_cli_cancel_list(struct list_head *head, int count,
struct ptlrpc_request *req, int off);
if (blwi->blwi_count) {
/* The special case when we cancel locks in lru
* asynchronously, we pass the list of locks here.
- * Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet. */
- ldlm_cli_cancel_list_local(&blwi->blwi_head,
- blwi->blwi_count, 0);
+ * Thus lock is marked LDLM_FL_CANCELING, and already
+ * canceled locally. */
ldlm_cli_cancel_list(&blwi->blwi_head,
blwi->blwi_count, NULL, 0);
} else {
EXPORT_SYMBOL(ldlm_namespace_foreach_res);
EXPORT_SYMBOL(ldlm_resource_iterate);
EXPORT_SYMBOL(ldlm_cancel_resource_local);
-EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
EXPORT_SYMBOL(ldlm_cli_cancel_list);
/* ldlm_lockd.c */
/* XXX until we will have compound requests and can cut cancels from generic rpc
* we need send cancels with LDLM_FL_BL_AST flag as separate rpc */
-int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags)
+static int ldlm_cancel_list(struct list_head *cancels, int count,
+ ldlm_cancel_flags_t flags)
{
CFS_LIST_HEAD(head);
struct ldlm_lock *lock, *next;
* sending any rpcs or waiting for any
* outstanding rpc to complete
*/
-static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
- struct list_head *cancels,
- int count, int max, int flags)
+int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
+ int count, int max, ldlm_cancel_flags_t cancel_flags,
+ int flags)
{
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
unused--;
}
spin_unlock(&ns->ns_unused_lock);
- RETURN(added);
-}
-
-int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
- int count, int max, ldlm_cancel_flags_t cancel_flags,
- int flags)
-{
- int added;
- added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
- if (added <= 0)
- return added;
- return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
+ RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
/* when called with LDLM_ASYNC the blocking callback will be handled
#ifndef __KERNEL__
mode = LDLM_SYNC; /* force to be sync in user space */
#endif
- /* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
- count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
+ count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode);
if (rc == 0)
unlock_res(res);
/* Handle only @count inserted locks. */
- RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags));
+ RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
}
/* If @req is NULL, send CANCEL request to server with handles of locks