From d7d8a4c8b1434ad7038acdee1d980d3ce70ba3c6 Mon Sep 17 00:00:00 2001 From: pravin Date: Mon, 19 Jul 2010 23:05:47 +0400 Subject: [PATCH] b=22244 a=vitaly i=oleg.drokin i=andreas fix stack overflow. (patch from attachment 29939 and 30120) --- lustre/include/lustre_dlm.h | 2 ++ lustre/ldlm/ldlm_lockd.c | 12 ++++++++---- lustre/ldlm/ldlm_request.c | 31 +++++++++++++++++++++---------- 3 files changed, 31 insertions(+), 14 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 76f5814..d6d3c73 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -1120,6 +1120,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, ldlm_policy_data_t *policy, ldlm_mode_t mode, int lock_flags, ldlm_cancel_flags_t cancel_flags, void *opaque); +int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count, + ldlm_cancel_flags_t flags); int ldlm_cli_cancel_list(cfs_list_t *head, int count, struct ptlrpc_request *req, ldlm_cancel_flags_t flags); diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index b5b2a4b..7d56276 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -2217,12 +2217,15 @@ static int ldlm_bl_thread_main(void *arg) cfs_memory_pressure_set(); if (blwi->blwi_count) { + int count; /* The special case when we cancel locks in lru * asynchronously, we pass the list of locks here. - * Thus lock is marked LDLM_FL_CANCELING, and already - * canceled locally. */ - ldlm_cli_cancel_list(&blwi->blwi_head, - blwi->blwi_count, NULL, 0); + * Thus locks are marked LDLM_FL_CANCELING, but NOT + * canceled locally yet. */ + count = ldlm_cli_cancel_list_local(&blwi->blwi_head, + blwi->blwi_count, + LCF_BL_AST); + ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0); } else { ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld, blwi->blwi_lock); @@ -2656,6 +2659,7 @@ EXPORT_SYMBOL(ldlm_namespace_foreach); EXPORT_SYMBOL(ldlm_namespace_foreach_res); EXPORT_SYMBOL(ldlm_resource_iterate); EXPORT_SYMBOL(ldlm_cancel_resource_local); +EXPORT_SYMBOL(ldlm_cli_cancel_list_local); EXPORT_SYMBOL(ldlm_cli_cancel_list); /* ldlm_lockd.c */ diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 8d651d9..13e3a6d 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -1269,8 +1269,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh) /* XXX until we will have compound requests and can cut cancels from generic rpc * we need send cancels with LDLM_FL_BL_AST flag as separate rpc */ -static int ldlm_cancel_list(cfs_list_t *cancels, int count, - ldlm_cancel_flags_t flags) +int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count, + ldlm_cancel_flags_t flags) { CFS_LIST_HEAD(head); struct ldlm_lock *lock, *next; @@ -1473,9 +1473,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags) * * flags & LDLM_CANCEL_AGED - cancel alocks according to "aged policy". */ -int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, - int count, int max, ldlm_cancel_flags_t cancel_flags, - int flags) +static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels, + int count, int max, int flags) { ldlm_cancel_lru_policy_t pf; struct ldlm_lock *lock, *next; @@ -1585,7 +1584,18 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, unused--; } cfs_spin_unlock(&ns->ns_unused_lock); - RETURN(ldlm_cancel_list(cancels, added, cancel_flags)); + RETURN(added); +} + +int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, + int count, int max, ldlm_cancel_flags_t cancel_flags, + int flags) +{ + int added; + added = ldlm_prepare_lru_list(ns, cancels, count, max, flags); + if (added <= 0) + return added; + return ldlm_cli_cancel_list_local(cancels, added, cancel_flags); } /* when called with LDLM_ASYNC the blocking callback will be handled @@ -1602,8 +1612,9 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode, #ifndef __KERNEL__ mode = LDLM_SYNC; /* force to be sync in user space */ #endif - count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags); - + /* Just prepare the list of locks, do not actually cancel them yet. + * Locks are cancelled later in a separate thread. */ + count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode); if (rc == 0) RETURN(count); @@ -1663,7 +1674,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res, } unlock_res(res); - RETURN(ldlm_cancel_list(cancels, count, cancel_flags)); + RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags)); } /* If @req is NULL, send CANCEL request to server with handles of locks @@ -1740,7 +1751,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, LDLM_RESOURCE_ADDREF(res); count = ldlm_cancel_resource_local(res, &cancels, policy, mode, - 0, flags, opaque); + 0, flags | LCF_BL_AST, opaque); rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags); if (rc != ELDLM_OK) CERROR("ldlm_cli_cancel_unused_resource: %d\n", rc); -- 1.8.3.1