From 8e365a02e794fd58099b2f771893f5ef3a73c7f7 Mon Sep 17 00:00:00 2001 From: Vitaly Fertman Date: Mon, 4 Feb 2013 17:27:03 +0400 Subject: [PATCH] LU-1565 ldlm: make blocking threads async wherenever possible There is no need to wait for the cancel lru lock completion in the client side pool recalculation, make it asynchronous. make all the ldlm_cli_cancel() calls from blocking callbacks async Change-Id: Ie510c7361f1025a78c693a11b457baf1652f8c90 Xyratex-bug-id: MRP-690 Reviewed-by: Andrew Perepechko Reviewed-by: Bruce Korb Signed-off-by: Vitaly Fertman Reviewed-on: http://review.whamcloud.com/4181 Reviewed-by: Mike Pershin Tested-by: Hudson Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/include/lustre_dlm.h | 3 +- lustre/ldlm/ldlm_internal.h | 15 +++----- lustre/ldlm/ldlm_lock.c | 2 +- lustre/ldlm/ldlm_lockd.c | 93 ++++++++++++++++++++++++--------------------- lustre/ldlm/ldlm_pool.c | 8 ++-- lustre/ldlm/ldlm_request.c | 40 ++++++++++--------- lustre/ldlm/ldlm_resource.c | 12 +++--- lustre/liblustre/namei.c | 2 +- lustre/liblustre/rw.c | 2 +- lustre/llite/namei.c | 2 +- lustre/mdt/mdt_handler.c | 2 +- lustre/mgc/mgc_request.c | 2 +- lustre/osc/osc_lock.c | 4 +- lustre/quota/qsd_lock.c | 4 +- 14 files changed, 99 insertions(+), 92 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 45ea747..df5014a 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -1611,7 +1611,8 @@ int ldlm_server_ast(struct lustre_handle *lockh, struct ldlm_lock_desc *new, void *data, __u32 data_len); int ldlm_cli_convert(struct lustre_handle *, int new_mode, __u32 *flags); int ldlm_cli_update_pool(struct ptlrpc_request *req); -int ldlm_cli_cancel(struct lustre_handle *lockh); +int ldlm_cli_cancel(struct lustre_handle *lockh, + ldlm_cancel_flags_t cancel_flags); int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *, ldlm_cancel_flags_t flags, void *opaque); int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index a29aae5..55483e9 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -62,11 +62,6 @@ static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client) } /* ldlm_request.c */ -typedef enum { - LDLM_ASYNC, - LDLM_SYNC, -} ldlm_sync_t; - /* Cancel lru flag, it indicates we cancel aged locks. */ enum { LDLM_CANCEL_AGED = 1 << 0, /* Cancel aged locks (non lru resize). */ @@ -77,8 +72,8 @@ enum { * sending nor waiting for any rpcs) */ }; -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync, - int flags); +int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, + ldlm_cancel_flags_t sync, int flags); int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, int count, int max, ldlm_cancel_flags_t cancel_flags, int flags); @@ -144,8 +139,10 @@ void ldlm_cancel_locks_for_export(struct obd_export *export); /* ldlm_lockd.c */ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); -int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count, int mode); +int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, + cfs_list_t *cancels, int count, + ldlm_cancel_flags_t cancel_flags); void ldlm_handle_bl_callback(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, struct ldlm_lock *lock); diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index cdd4c6d..abd1e8c 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -943,7 +943,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode) * enqueue. */ if (!exp_connect_cancelset(lock->l_conn_export) && !ns_connect_lru_resize(ns)) - ldlm_cancel_lru(ns, 0, LDLM_ASYNC, 0); + ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0); } else { LDLM_DEBUG(lock, "do not add lock into lru list"); unlock_res_and_lock(lock); diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 4a75bb0..8d2057f 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -124,7 +124,7 @@ struct ldlm_bl_work_item { cfs_list_t blwi_head; int blwi_count; struct completion blwi_comp; - int blwi_mode; + ldlm_cancel_flags_t blwi_flags; int blwi_mem_pressure; }; @@ -1914,7 +1914,8 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc) } #ifdef __KERNEL__ -static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode) +static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, + ldlm_cancel_flags_t cancel_flags) { struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; ENTRY; @@ -1932,20 +1933,20 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode) cfs_waitq_signal(&blp->blp_waitq); - /* can not use blwi->blwi_mode as blwi could be already freed in - LDLM_ASYNC mode */ - if (mode == LDLM_SYNC) + /* can not check blwi->blwi_flags as blwi could be already freed in + LCF_ASYNC mode */ + if (!(cancel_flags & LCF_ASYNC)) wait_for_completion(&blwi->blwi_comp); RETURN(0); } static inline void init_blwi(struct ldlm_bl_work_item *blwi, - struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count, - struct ldlm_lock *lock, - int mode) + struct ldlm_namespace *ns, + struct ldlm_lock_desc *ld, + cfs_list_t *cancels, int count, + struct ldlm_lock *lock, + ldlm_cancel_flags_t cancel_flags) { init_completion(&blwi->blwi_comp); CFS_INIT_LIST_HEAD(&blwi->blwi_head); @@ -1954,7 +1955,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, blwi->blwi_mem_pressure = 1; blwi->blwi_ns = ns; - blwi->blwi_mode = mode; + blwi->blwi_flags = cancel_flags; if (ld != NULL) blwi->blwi_ld = *ld; if (count) { @@ -1976,52 +1977,57 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, * call ->l_blocking_ast itself. */ static int ldlm_bl_to_thread(struct ldlm_namespace *ns, - struct ldlm_lock_desc *ld, struct ldlm_lock *lock, - cfs_list_t *cancels, int count, int mode) + struct ldlm_lock_desc *ld, + struct ldlm_lock *lock, + cfs_list_t *cancels, int count, + ldlm_cancel_flags_t cancel_flags) { - ENTRY; + ENTRY; - if (cancels && count == 0) - RETURN(0); + if (cancels && count == 0) + RETURN(0); - if (mode == LDLM_SYNC) { - /* if it is synchronous call do minimum mem alloc, as it could - * be triggered from kernel shrinker - */ - struct ldlm_bl_work_item blwi; - memset(&blwi, 0, sizeof(blwi)); - init_blwi(&blwi, ns, ld, cancels, count, lock, LDLM_SYNC); - RETURN(__ldlm_bl_to_thread(&blwi, LDLM_SYNC)); - } else { - struct ldlm_bl_work_item *blwi; - OBD_ALLOC(blwi, sizeof(*blwi)); - if (blwi == NULL) - RETURN(-ENOMEM); - init_blwi(blwi, ns, ld, cancels, count, lock, LDLM_ASYNC); + if (cancel_flags & LCF_ASYNC) { + struct ldlm_bl_work_item *blwi; - RETURN(__ldlm_bl_to_thread(blwi, LDLM_ASYNC)); - } + OBD_ALLOC(blwi, sizeof(*blwi)); + if (blwi == NULL) + RETURN(-ENOMEM); + init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags); + + RETURN(__ldlm_bl_to_thread(blwi, cancel_flags)); + } else { + /* if it is synchronous call do minimum mem alloc, as it could + * be triggered from kernel shrinker + */ + struct ldlm_bl_work_item blwi; + + memset(&blwi, 0, sizeof(blwi)); + init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags); + RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags)); + } } #endif int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - struct ldlm_lock *lock) + struct ldlm_lock *lock) { #ifdef __KERNEL__ - RETURN(ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LDLM_ASYNC)); + return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC); #else - RETURN(-ENOSYS); + return -ENOSYS; #endif } int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld, - cfs_list_t *cancels, int count, int mode) + cfs_list_t *cancels, int count, + ldlm_cancel_flags_t cancel_flags) { #ifdef __KERNEL__ - RETURN(ldlm_bl_to_thread(ns, ld, NULL, cancels, count, mode)); + return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags); #else - RETURN(-ENOSYS); + return -ENOSYS; #endif } @@ -2215,7 +2221,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) * which the server has already started a blocking callback on. */ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) && lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { - rc = ldlm_cli_cancel(&dlm_req->lock_handle[0]); + rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0); if (rc < 0) CERROR("ldlm_cli_cancel: %d\n", rc); } @@ -2647,7 +2653,8 @@ static int ldlm_bl_thread_main(void *arg) count = ldlm_cli_cancel_list_local(&blwi->blwi_head, blwi->blwi_count, LCF_BL_AST); - ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, 0); + ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL, + blwi->blwi_flags); } else { ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld, blwi->blwi_lock); @@ -2655,9 +2662,9 @@ static int ldlm_bl_thread_main(void *arg) if (blwi->blwi_mem_pressure) cfs_memory_pressure_clr(); - if (blwi->blwi_mode == LDLM_ASYNC) - OBD_FREE(blwi, sizeof(*blwi)); - else + if (blwi->blwi_flags & LCF_ASYNC) + OBD_FREE(blwi, sizeof(*blwi)); + else complete(&blwi->blwi_comp); } diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 820a50d..d724a2d 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -518,8 +518,8 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) * It may be called when SLV has changed much, this is why we do not * take into account pl->pl_recalc_time here. */ - RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LDLM_SYNC, - LDLM_CANCEL_LRUR)); + RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, + LDLM_CANCEL_LRUR)); } /** @@ -551,8 +551,8 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl, spin_unlock(&ns->ns_lock); if (nr) { - canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC, - LDLM_CANCEL_SHRINK); + canceled = ldlm_cancel_lru(ns, nr, LCF_ASYNC, + LDLM_CANCEL_SHRINK); } #ifdef __KERNEL__ /* diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 106241b..aa30599 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -332,7 +332,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock) LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel"); ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); if (rc < 0) CERROR("ldlm_cli_cancel: %d\n", rc); } else { @@ -1341,7 +1341,8 @@ EXPORT_SYMBOL(ldlm_cli_update_pool); * * Lock must not have any readers or writers by this time. */ -int ldlm_cli_cancel(struct lustre_handle *lockh) +int ldlm_cli_cancel(struct lustre_handle *lockh, + ldlm_cancel_flags_t cancel_flags) { struct obd_export *exp; int avail, flags, count = 1; @@ -1382,7 +1383,7 @@ int ldlm_cli_cancel(struct lustre_handle *lockh) count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1, LCF_BL_AST, flags); } - ldlm_cli_cancel_list(&cancels, count, NULL, 0); + ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags); RETURN(0); } EXPORT_SYMBOL(ldlm_cli_cancel); @@ -1774,29 +1775,30 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels, /** * Cancel at least \a nr locks from given namespace LRU. * - * When called with LDLM_ASYNC the blocking callback will be handled + * When called with LCF_ASYNC the blocking callback will be handled * in a thread and this function will return after the thread has been - * asked to call the callback. When called with LDLM_SYNC the blocking + * asked to call the callback. When called with LCF_ASYNC the blocking * callback will be performed in this function. */ -int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t mode, - int flags) +int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, + ldlm_cancel_flags_t cancel_flags, + int flags) { - CFS_LIST_HEAD(cancels); - int count, rc; - ENTRY; + CFS_LIST_HEAD(cancels); + int count, rc; + ENTRY; #ifndef __KERNEL__ - mode = LDLM_SYNC; /* force to be sync in user space */ + cancel_flags &= ~LCF_ASYNC; /* force to be sync in user space */ #endif - /* Just prepare the list of locks, do not actually cancel them yet. - * Locks are cancelled later in a separate thread. */ - count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); - rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, mode); - if (rc == 0) - RETURN(count); - - RETURN(0); + /* Just prepare the list of locks, do not actually cancel them yet. + * Locks are cancelled later in a separate thread. */ + count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags); + rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags); + if (rc == 0) + RETURN(count); + + RETURN(0); } /** diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 2174271..e48124b 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -203,8 +203,8 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer, int canceled, unused = ns->ns_nr_unused; /* Try to cancel all @ns_nr_unused locks. */ - canceled = ldlm_cancel_lru(ns, unused, LDLM_SYNC, - LDLM_CANCEL_PASSED); + canceled = ldlm_cancel_lru(ns, unused, 0, + LDLM_CANCEL_PASSED); if (canceled < unused) { CDEBUG(D_DLMTRACE, "not all requested locks are canceled, " @@ -215,7 +215,7 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer, } else { tmp = ns->ns_max_unused; ns->ns_max_unused = 0; - ldlm_cancel_lru(ns, 0, LDLM_SYNC, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED); ns->ns_max_unused = tmp; } return count; @@ -240,7 +240,7 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer, "changing namespace %s unused locks from %u to %u\n", ldlm_ns_name(ns), ns->ns_nr_unused, (unsigned int)tmp); - ldlm_cancel_lru(ns, tmp, LDLM_ASYNC, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED); if (!lru_resize) { CDEBUG(D_DLMTRACE, @@ -254,7 +254,7 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer, ldlm_ns_name(ns), ns->ns_max_unused, (unsigned int)tmp); ns->ns_max_unused = (unsigned int)tmp; - ldlm_cancel_lru(ns, 0, LDLM_ASYNC, LDLM_CANCEL_PASSED); + ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED); /* Make sure that LRU resize was originally supported before * turning it on here. */ @@ -765,7 +765,7 @@ static void cleanup_resource(struct ldlm_resource *res, cfs_list_t *q, unlock_res(res); ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); if (rc) CERROR("ldlm_cli_cancel: %d\n", rc); } else { diff --git a/lustre/liblustre/namei.c b/lustre/liblustre/namei.c index a2ce417..209cc92 100644 --- a/lustre/liblustre/namei.c +++ b/lustre/liblustre/namei.c @@ -121,7 +121,7 @@ int llu_md_blocking_ast(struct ldlm_lock *lock, switch (flag) { case LDLM_CB_BLOCKING: ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, 0); if (rc < 0) { CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc); RETURN(rc); diff --git a/lustre/liblustre/rw.c b/lustre/liblustre/rw.c index 8caa98f..8a6999f 100644 --- a/lustre/liblustre/rw.c +++ b/lustre/liblustre/rw.c @@ -99,7 +99,7 @@ int llu_extent_lock_cancel_cb(struct ldlm_lock *lock, switch (flag) { case LDLM_CB_BLOCKING: ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, 0); if (rc != ELDLM_OK) CERROR("ldlm_cli_cancel failed: %d\n", rc); break; diff --git a/lustre/llite/namei.c b/lustre/llite/namei.c index 1477790..00965bf 100644 --- a/lustre/llite/namei.c +++ b/lustre/llite/namei.c @@ -205,7 +205,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, switch (flag) { case LDLM_CB_BLOCKING: ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); if (rc < 0) { CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc); RETURN(rc); diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index 151680e..fb5de09 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -2487,7 +2487,7 @@ int mdt_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, switch (flag) { case LDLM_CB_BLOCKING: ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); if (rc < 0) { CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc); RETURN(rc); diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c index 67e4179..9e2357b 100644 --- a/lustre/mgc/mgc_request.c +++ b/lustre/mgc/mgc_request.c @@ -815,7 +815,7 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, /* mgs wants the lock, give it up... */ LDLM_DEBUG(lock, "MGC blocking CB"); ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); break; case LDLM_CB_CANCELING: /* We've given up the lock, prepare ourselves to update. */ diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index b09cb0b..76bf64c 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -681,7 +681,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env, lockh = &osc_env_info(env)->oti_handle; ldlm_lock2handle(dlmlock, lockh); - result = ldlm_cli_cancel(lockh); + result = ldlm_cli_cancel(lockh, LCF_ASYNC); } else result = 0; return result; @@ -1379,7 +1379,7 @@ static void osc_lock_cancel(const struct lu_env *env, dlmlock->l_flags |= LDLM_FL_CBPENDING; unlock_res_and_lock(dlmlock); if (do_cancel) - result = ldlm_cli_cancel(&olck->ols_handle); + result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC); if (result < 0) CL_LOCK_DEBUG(D_ERROR, env, lock, "lock %p cancel failure with error(%d)\n", diff --git a/lustre/quota/qsd_lock.c b/lustre/quota/qsd_lock.c index c63d033..f198942 100644 --- a/lustre/quota/qsd_lock.c +++ b/lustre/quota/qsd_lock.c @@ -166,7 +166,7 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock, LDLM_DEBUG(lock, "blocking AST on global quota lock"); ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); break; } case LDLM_CB_CANCELING: { @@ -290,7 +290,7 @@ static int qsd_id_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *de LDLM_DEBUG(lock, "blocking AST on ID quota lock"); ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); + rc = ldlm_cli_cancel(&lockh, LCF_ASYNC); break; } case LDLM_CB_CANCELING: { -- 1.8.3.1