From: Vitaly Fertman Date: Tue, 14 Oct 2014 14:07:15 +0000 (+0400) Subject: LU-5739 ldlm: interval tree search in ldlm_lock_match() X-Git-Tag: 2.7.55~51 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=dd093dba75da4223ff1efc04483a75d4fc93d18d LU-5739 ldlm: interval tree search in ldlm_lock_match() replace the linear search by interval_tree one for granted list in ldlm_lock_match() Signed-off-by: Vitaly Fertman Change-Id: I3932521b279253ac130835d53d489477be7598ef Xyratex-bug-id: MRP-2089 Reviewed-on: http://review.whamcloud.com/12294 Tested-by: Jenkins Reviewed-by: Andreas Dilger Reviewed-by: Jinshan Xiong Tested-by: Maloo Reviewed-by: Oleg Drokin --- diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 302c965..f009bfa 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1121,88 +1121,167 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) } /** - * Search for a lock with given properties in a queue. + * Describe the overlap between two locks. itree_overlap_cb data. + */ +struct lock_match_data { + struct ldlm_lock *lmd_old; + struct ldlm_lock *lmd_lock; + ldlm_mode_t *lmd_mode; + ldlm_policy_data_t *lmd_policy; + __u64 lmd_flags; + int lmd_unref; +}; + +/** + * Check if the given @lock meets the criteria for a match. + * A reference on the lock is taken if matched. * - * \retval a referenced lock or NULL. See the flag descriptions below, in the - * comment above ldlm_lock_match + * \param lock test-against this lock + * \param data parameters */ -static struct ldlm_lock *search_queue(struct list_head *queue, - ldlm_mode_t *mode, - ldlm_policy_data_t *policy, - struct ldlm_lock *old_lock, - __u64 flags, int unref) -{ - struct ldlm_lock *lock; - struct list_head *tmp; +static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data) +{ + ldlm_policy_data_t *lpol = &lock->l_policy_data; + ldlm_mode_t match; + + if (lock == data->lmd_old) + return INTERVAL_ITER_STOP; + + /* Check if this lock can be matched. + * Used by LU-2919(exclusive open) for open lease lock */ + if (ldlm_is_excl(lock)) + return INTERVAL_ITER_CONT; + + /* llite sometimes wants to match locks that will be + * canceled when their users drop, but we allow it to match + * if it passes in CBPENDING and the lock still has users. + * this is generally only going to be used by children + * whose parents already hold a lock so forward progress + * can still happen. */ + if (ldlm_is_cbpending(lock) && + !(data->lmd_flags & LDLM_FL_CBPENDING)) + return INTERVAL_ITER_CONT; + if (!data->lmd_unref && ldlm_is_cbpending(lock) && + lock->l_readers == 0 && lock->l_writers == 0) + return INTERVAL_ITER_CONT; + + if (!(lock->l_req_mode & *data->lmd_mode)) + return INTERVAL_ITER_CONT; + match = lock->l_req_mode; + + switch (lock->l_resource->lr_type) { + case LDLM_EXTENT: + if (lpol->l_extent.start > data->lmd_policy->l_extent.start || + lpol->l_extent.end < data->lmd_policy->l_extent.end) + return INTERVAL_ITER_CONT; - list_for_each(tmp, queue) { - ldlm_mode_t match; + if (unlikely(match == LCK_GROUP) && + data->lmd_policy->l_extent.gid != LDLM_GID_ANY && + lpol->l_extent.gid != data->lmd_policy->l_extent.gid) + return INTERVAL_ITER_CONT; + break; + case LDLM_IBITS: + /* We match if we have existing lock with same or wider set + of bits. */ + if ((lpol->l_inodebits.bits & + data->lmd_policy->l_inodebits.bits) != + data->lmd_policy->l_inodebits.bits) + return INTERVAL_ITER_CONT; + break; + default: + ; + } - lock = list_entry(tmp, struct ldlm_lock, l_res_link); + /* We match if we have existing lock with same or wider set + of bits. */ + if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE)) + return INTERVAL_ITER_CONT; - if (lock == old_lock) - break; + if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) && + !ldlm_is_local(lock)) + return INTERVAL_ITER_CONT; - /* Check if this lock can be matched. - * Used by LU-2919(exclusive open) for open lease lock */ - if (ldlm_is_excl(lock)) - continue; + if (data->lmd_flags & LDLM_FL_TEST_LOCK) { + LDLM_LOCK_GET(lock); + ldlm_lock_touch_in_lru(lock); + } else { + ldlm_lock_addref_internal_nolock(lock, match); + } - /* llite sometimes wants to match locks that will be - * canceled when their users drop, but we allow it to match - * if it passes in CBPENDING and the lock still has users. - * this is generally only going to be used by children - * whose parents already hold a lock so forward progress - * can still happen. */ - if (ldlm_is_cbpending(lock) && - !(flags & LDLM_FL_CBPENDING)) - continue; - if (!unref && ldlm_is_cbpending(lock) && - lock->l_readers == 0 && lock->l_writers == 0) - continue; + *data->lmd_mode = match; + data->lmd_lock = lock; - if (!(lock->l_req_mode & *mode)) - continue; - match = lock->l_req_mode; + return INTERVAL_ITER_STOP; +} - if (lock->l_resource->lr_type == LDLM_EXTENT && - (lock->l_policy_data.l_extent.start > - policy->l_extent.start || - lock->l_policy_data.l_extent.end < policy->l_extent.end)) - continue; +static unsigned int itree_overlap_cb(struct interval_node *in, void *args) +{ + struct ldlm_interval *node = to_ldlm_interval(in); + struct lock_match_data *data = args; + struct ldlm_lock *lock; + int rc; - if (unlikely(match == LCK_GROUP) && - lock->l_resource->lr_type == LDLM_EXTENT && - policy->l_extent.gid != LDLM_GID_ANY && - lock->l_policy_data.l_extent.gid != policy->l_extent.gid) + list_for_each_entry(lock, &node->li_group, l_sl_policy) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return INTERVAL_ITER_STOP; + } + return INTERVAL_ITER_CONT; +} + +/** + * Search for a lock with given parameters in interval trees. + * + * \param res search for a lock in this resource + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_itree(struct ldlm_resource *res, + struct lock_match_data *data) +{ + struct interval_node_extent ext = { + .start = data->lmd_policy->l_extent.start, + .end = data->lmd_policy->l_extent.end + }; + int idx; + + for (idx = 0; idx < LCK_MODE_NUM; idx++) { + struct ldlm_interval_tree *tree = &res->lr_itree[idx]; + + if (tree->lit_root == NULL) continue; - /* We match if we have existing lock with same or wider set - of bits. */ - if (lock->l_resource->lr_type == LDLM_IBITS && - ((lock->l_policy_data.l_inodebits.bits & - policy->l_inodebits.bits) != - policy->l_inodebits.bits)) - continue; + if (!(tree->lit_mode & *data->lmd_mode)) + continue; - if (!unref && LDLM_HAVE_MASK(lock, GONE)) - continue; + interval_search(tree->lit_root, &ext, + itree_overlap_cb, data); + } + return data->lmd_lock; +} - if ((flags & LDLM_FL_LOCAL_ONLY) && - !ldlm_is_local(lock)) - continue; - if (flags & LDLM_FL_TEST_LOCK) { - LDLM_LOCK_GET(lock); - ldlm_lock_touch_in_lru(lock); - } else { - ldlm_lock_addref_internal_nolock(lock, match); - } - *mode = match; - return lock; - } +/** + * Search for a lock with given properties in a queue. + * + * \param queue search for a lock in this queue + * \param data parameters + * + * \retval a referenced lock or NULL. + */ +static struct ldlm_lock *search_queue(struct list_head *queue, + struct lock_match_data *data) +{ + struct ldlm_lock *lock; + int rc; - return NULL; + list_for_each_entry(lock, queue, l_res_link) { + rc = lock_matches(lock, data); + if (rc == INTERVAL_ITER_STOP) + return data->lmd_lock; + } + return NULL; } void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) @@ -1279,47 +1358,55 @@ EXPORT_SYMBOL(ldlm_lock_allow_match); */ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, const struct ldlm_res_id *res_id, ldlm_type_t type, - ldlm_policy_data_t *policy, ldlm_mode_t mode, - struct lustre_handle *lockh, int unref) -{ - struct ldlm_resource *res; - struct ldlm_lock *lock, *old_lock = NULL; - int rc = 0; - ENTRY; + ldlm_policy_data_t *policy, ldlm_mode_t mode, + struct lustre_handle *lockh, int unref) +{ + struct lock_match_data data = { + .lmd_old = NULL, + .lmd_lock = NULL, + .lmd_mode = &mode, + .lmd_policy = policy, + .lmd_flags = flags, + .lmd_unref = unref, + }; + struct ldlm_resource *res; + struct ldlm_lock *lock; + int rc = 0; + ENTRY; - if (ns == NULL) { - old_lock = ldlm_handle2lock(lockh); - LASSERT(old_lock); + if (ns == NULL) { + data.lmd_old = ldlm_handle2lock(lockh); + LASSERT(data.lmd_old != NULL); - ns = ldlm_lock_to_ns(old_lock); - res_id = &old_lock->l_resource->lr_name; - type = old_lock->l_resource->lr_type; - mode = old_lock->l_req_mode; - } + ns = ldlm_lock_to_ns(data.lmd_old); + res_id = &data.lmd_old->l_resource->lr_name; + type = data.lmd_old->l_resource->lr_type; + *data.lmd_mode = data.lmd_old->l_req_mode; + } res = ldlm_resource_get(ns, NULL, res_id, type, 0); if (IS_ERR(res)) { - LASSERT(old_lock == NULL); + LASSERT(data.lmd_old == NULL); RETURN(0); } - LDLM_RESOURCE_ADDREF(res); - lock_res(res); - - lock = search_queue(&res->lr_granted, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); - if (flags & LDLM_FL_BLOCK_GRANTED) - GOTO(out, rc = 0); - lock = search_queue(&res->lr_converting, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); - lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, - flags, unref); - if (lock != NULL) - GOTO(out, rc = 1); + LDLM_RESOURCE_ADDREF(res); + lock_res(res); + + if (res->lr_type == LDLM_EXTENT) + lock = search_itree(res, &data); + else + lock = search_queue(&res->lr_granted, &data); + if (lock != NULL) + GOTO(out, rc = 1); + if (flags & LDLM_FL_BLOCK_GRANTED) + GOTO(out, rc = 0); + lock = search_queue(&res->lr_converting, &data); + if (lock != NULL) + GOTO(out, rc = 1); + lock = search_queue(&res->lr_waiting, &data); + if (lock != NULL) + GOTO(out, rc = 1); EXIT; out: @@ -1394,10 +1481,10 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, (type == LDLM_PLAIN || type == LDLM_IBITS) ? res_id->name[3] : policy->l_extent.end); } - if (old_lock) - LDLM_LOCK_PUT(old_lock); + if (data.lmd_old != NULL) + LDLM_LOCK_PUT(data.lmd_old); - return rc ? mode : 0; + return rc ? mode : 0; } EXPORT_SYMBOL(ldlm_lock_match);