* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2013, Intel Corporation.
+ * Copyright (c) 2010, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
lock->l_policy_data.l_extent.gid) {
/* If existing lock with matched gid is granted,
we grant new one too. */
- if (lock->l_req_mode == lock->l_granted_mode)
- RETURN(2);
+ if (ldlm_is_granted(lock))
+ RETURN(2);
/* Otherwise we are scanning queue of waiting
* locks and it means current request would
}
}
- if (unlikely(req_mode == LCK_GROUP &&
- (lock->l_req_mode != lock->l_granted_mode))) {
+ if (unlikely(req_mode == LCK_GROUP &&
+ !ldlm_is_granted(lock))) {
scan = 1;
compat = 0;
if (lock->l_req_mode != LCK_GROUP) {
{
time64_t timeout;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
+
if (arg->lpa_export != lock->l_export ||
lock->l_flags & LDLM_FL_DESTROYED)
/* ignore unrelated locks */
enum ldlm_error *err, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
- struct list_head rpc_list;
int rc, rc2;
int contended_locks = 0;
+ struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
+ NULL : work_list;
ENTRY;
- LASSERT(lock->l_granted_mode != lock->l_req_mode);
- LASSERT(list_empty(&res->lr_converting));
+ LASSERT(!ldlm_is_granted(lock));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
!ldlm_is_ast_discard_data(lock));
- INIT_LIST_HEAD(&rpc_list);
check_res_locked(res);
*err = ELDLM_OK;
if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
ldlm_extent_policy(res, lock, flags);
- ldlm_grant_lock(lock, work_list);
+ ldlm_grant_lock(lock, grant_work);
RETURN(LDLM_ITER_CONTINUE);
}
- LASSERT((intention == LDLM_PROCESS_ENQUEUE && work_list == NULL) ||
- (intention == LDLM_PROCESS_RECOVERY && work_list != NULL));
- restart:
contended_locks = 0;
rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
- &rpc_list, &contended_locks);
+ work_list, &contended_locks);
if (rc < 0)
GOTO(out_rpc_list, rc);
rc2 = 0;
if (rc != 2) {
rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
- flags, err, &rpc_list,
+ flags, err, work_list,
&contended_locks);
if (rc2 < 0)
GOTO(out_rpc_list, rc = rc2);
}
- if (rc + rc2 != 2) {
- /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to force
- * client to wait for the lock endlessly once the lock is
- * enqueued -bzzz */
- rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list,
- LDLM_FL_NO_TIMEOUT);
- if (rc == -ERESTART)
- GOTO(restart, rc);
- *err = rc;
- } else {
+ if (rc + rc2 == 2) {
ldlm_extent_policy(res, lock, flags);
ldlm_resource_unlink_lock(lock);
- ldlm_grant_lock(lock, work_list);
- rc = 0;
+ ldlm_grant_lock(lock, grant_work);
+ } else {
+ /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
+ * force client to wait for the lock endlessly once
+ * the lock is enqueued -bzzz */
+ *flags |= LDLM_FL_NO_TIMEOUT;
}
+ rc = LDLM_ITER_CONTINUE;
out_rpc_list:
- if (!list_empty(&rpc_list)) {
- LASSERT(!ldlm_is_ast_discard_data(lock));
- ldlm_discard_bl_list(&rpc_list);
- }
RETURN(rc);
}
#endif /* HAVE_SERVER_SUPPORT */
EXPORT_SYMBOL(ldlm_extent_shift_kms);
struct kmem_cache *ldlm_interval_slab;
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
+static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
{
struct ldlm_interval *node;
ENTRY;
LASSERT(mode != 0);
LASSERT(is_power_of_2(mode));
- for (index = -1; mode != 0; index++, mode >>= 1)
- /* do nothing */;
+ index = ilog2(mode);
LASSERT(index < LCK_MODE_NUM);
return index;
}
+int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
+{
+ lock->l_tree_node = NULL;
+ if (ldlm_interval_alloc(lock) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
/** Add newly granted lock into interval tree for the resource. */
void ldlm_extent_add_lock(struct ldlm_resource *res,
struct ldlm_lock *lock)
struct ldlm_extent *extent;
int idx, rc;
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ LASSERT(ldlm_is_granted(lock));
node = lock->l_tree_node;
LASSERT(node != NULL);