From 2cb1701012c4f1f0f1abdc99eb8ce6d4bb9c7846 Mon Sep 17 00:00:00 2001 From: pravin Date: Thu, 24 Jun 2010 21:44:45 +0530 Subject: [PATCH] b=21128 run sync ldlm_bl_to_thread_list() in separate thread to save stack space. i=oleg.drokin i=rahul i=vitaly --- lustre/ldlm/ldlm_internal.h | 2 +- lustre/ldlm/ldlm_lockd.c | 98 ++++++++++++++++++++++++++++++++++----------- lustre/ldlm/ldlm_request.c | 18 ++++----- 3 files changed, 83 insertions(+), 35 deletions(-) diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index cbad123..d6ff17f 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -138,7 +138,7 @@ void ldlm_cancel_locks_for_export(struct obd_export *export); int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count); + cfs_list_t *cancels, int count, int mode); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index cd8623d..ca784e8 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -133,9 +133,21 @@ struct ldlm_bl_work_item { struct ldlm_lock *blwi_lock; cfs_list_t blwi_head; int blwi_count; + cfs_completion_t blwi_comp; + cfs_atomic_t blwi_ref_count; }; #ifdef __KERNEL__ +static inline void ldlm_bl_work_item_get(struct ldlm_bl_work_item *blwi) +{ + cfs_atomic_inc(&blwi->blwi_ref_count); +} + +static inline void ldlm_bl_work_item_put(struct ldlm_bl_work_item *blwi) +{ + if (cfs_atomic_dec_and_test(&blwi->blwi_ref_count)) + OBD_FREE(blwi, sizeof(*blwi)); +} static inline int have_expired_locks(void) { @@ -1622,20 +1634,40 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) } #ifdef __KERNEL__ -static int ldlm_bl_to_thread(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, struct ldlm_lock *lock, - cfs_list_t *cancels, int count) +static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; - struct ldlm_bl_work_item *blwi; ENTRY; - if (cancels && count == 0) - RETURN(0); + cfs_spin_lock(&blp->blp_lock); + if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) { + /* add LDLM_FL_DISCARD_DATA requests to the priority list */ + cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list); + } else { + /* other blocking callbacks are added to the regular list */ + cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list); + } + cfs_spin_unlock(&blp->blp_lock); - OBD_ALLOC(blwi, sizeof(*blwi)); - if (blwi == NULL) - RETURN(-ENOMEM); + cfs_waitq_signal(&blp->blp_waitq); + if (mode == LDLM_SYNC) + /* keep ref count as object is on this stack for SYNC call */ + cfs_wait_for_completion(&blwi->blwi_comp); + + RETURN(0); +} + +static inline void init_blwi(struct ldlm_bl_work_item *blwi, + struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, + cfs_list_t *cancels, int count, + struct ldlm_lock *lock) +{ + cfs_init_completion(&blwi->blwi_comp); + /* set ref count to 1 initially, supposed to be released in + * ldlm_bl_thread_main(), if not allocated on the stack */ + cfs_atomic_set(&blwi->blwi_ref_count, 1); + CFS_INIT_LIST_HEAD(&blwi->blwi_head); blwi->blwi_ns = ns; if (ld != NULL) @@ -1647,36 +1679,55 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns, } else { blwi->blwi_lock = lock; } - cfs_spin_lock(&blp->blp_lock); - if (lock && lock->l_flags & LDLM_FL_DISCARD_DATA) { - /* add LDLM_FL_DISCARD_DATA requests to the priority list */ - cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list); +} + +static int ldlm_bl_to_thread(struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, struct ldlm_lock *lock, + cfs_list_t *cancels, int count, int mode) +{ + ENTRY; + + if (cancels && count == 0) + RETURN(0); + + if (mode == LDLM_SYNC) { + /* if it is synchronous call do minimum mem alloc, as it could + * be triggered from kernel shrinker + */ + struct ldlm_bl_work_item blwi; + memset(&blwi, 0, sizeof(blwi)); + init_blwi(&blwi, ns, ld, cancels, count, lock); + /* take extra ref as this obj is on stack */ + ldlm_bl_work_item_get(&blwi); + RETURN(__ldlm_bl_to_thread(&blwi, mode)); } else { - /* other blocking callbacks are added to the regular list */ - cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list); - } - cfs_waitq_signal(&blp->blp_waitq); - cfs_spin_unlock(&blp->blp_lock); + struct ldlm_bl_work_item *blwi; + OBD_ALLOC(blwi, sizeof(*blwi)); + if (blwi == NULL) + RETURN(-ENOMEM); + init_blwi(blwi, ns, ld, cancels, count, lock); - RETURN(0); + RETURN(__ldlm_bl_to_thread(blwi, mode)); + } } + #endif int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock) { #ifdef __KERNEL__ - RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0)); + RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC)); #else RETURN(-ENOSYS); #endif } int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count) + cfs_list_t *cancels, int count, int mode) { #ifdef __KERNEL__ - RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count)); + RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode)); #else RETURN(-ENOSYS); #endif @@ -2181,7 +2232,8 @@ static int ldlm_bl_thread_main(void *arg) ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld, blwi->blwi_lock); } - OBD_FREE(blwi, sizeof(*blwi)); + cfs_complete(&blwi->blwi_comp); + ldlm_bl_work_item_put(blwi); } cfs_atomic_dec(&blp->blp_busy_threads); diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index ae98536..8d651d9 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -1592,7 +1592,7 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, * in a thread and this function will return after the thread has been * asked to call the callback. when called with LDLM_SYNC the blocking * callback will be performed in this function. */ -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, +int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode, int flags) { CFS_LIST_HEAD(cancels); @@ -1600,19 +1600,15 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, ENTRY; #ifndef __KERNEL__ - sync = LDLM_SYNC; /* force to be sync in user space */ + mode = LDLM_SYNC; /* force to be sync in user space */ #endif count = ldlm_cancel_lru_local(ns, &cancels, nr, 0, 0, flags); - if (sync == LDLM_ASYNC) { - rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count); - if (rc == 0) - RETURN(count); - } - /* If an error occured in ASYNC mode, or this is SYNC mode, - * cancel the list. */ - ldlm_cli_cancel_list(&cancels, count, NULL, 0); - RETURN(count); + rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode); + if (rc == 0) + RETURN(count); + + RETURN(0); } /* Find and cancel locally unused locks found on resource, matched to the -- 1.8.3.1