struct lu_ref lr_reference;
};
+static inline int ldlm_is_granted(struct ldlm_lock *lock)
+{
+ return lock->l_req_mode == lock->l_granted_mode;
+}
+
static inline bool ldlm_has_layout(struct ldlm_lock *lock)
{
return lock->l_resource->lr_type == LDLM_IBITS &&
lock->l_policy_data.l_extent.gid) {
/* If existing lock with matched gid is granted,
we grant new one too. */
- if (lock->l_req_mode == lock->l_granted_mode)
- RETURN(2);
+ if (ldlm_is_granted(lock))
+ RETURN(2);
/* Otherwise we are scanning queue of waiting
* locks and it means current request would
}
}
- if (unlikely(req_mode == LCK_GROUP &&
- (lock->l_req_mode != lock->l_granted_mode))) {
+ if (unlikely(req_mode == LCK_GROUP &&
+ !ldlm_is_granted(lock))) {
scan = 1;
compat = 0;
if (lock->l_req_mode != LCK_GROUP) {
NULL : work_list;
ENTRY;
- LASSERT(lock->l_granted_mode != lock->l_req_mode);
+ LASSERT(!ldlm_is_granted(lock));
LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
!ldlm_is_ast_discard_data(lock));
check_res_locked(res);
struct ldlm_extent *extent;
int idx, rc;
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ LASSERT(ldlm_is_granted(lock));
node = lock->l_tree_node;
LASSERT(node != NULL);
ENTRY;
- LASSERT(lock->l_granted_mode != lock->l_req_mode);
+ LASSERT(!ldlm_is_granted(lock));
check_res_locked(res);
if (intention == LDLM_PROCESS_RESCAN) {
int ret = 0;
lock_res_and_lock(lock);
- if ((lock->l_req_mode == lock->l_granted_mode) &&
- !ldlm_is_cp_reqd(lock))
+ if (ldlm_is_granted(lock) && !ldlm_is_cp_reqd(lock))
ret = 1;
else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
{
struct sl_insert_point prev;
- LASSERT(lock->l_req_mode == lock->l_granted_mode);
+ LASSERT(ldlm_is_granted(lock));
search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
ldlm_granted_list_add_lock(lock, &prev);
*flags |= LDLM_FL_LOCK_CHANGED;
RETURN(0);
} else if (rc != ELDLM_OK &&
- lock->l_req_mode == lock->l_granted_mode) {
+ ldlm_is_granted(lock)) {
LASSERT(*flags & LDLM_FL_RESENT);
/* It may happen that ns_policy returns an error in
* resend case, object may be unlinked or just some
* Take NO_TIMEOUT from the lock as it is inherited through
* LDLM_FL_INHERIT_MASK */
*flags |= LDLM_FL_LOCK_CHANGED;
- if (lock->l_req_mode != lock->l_granted_mode)
+ if (!ldlm_is_granted(lock))
*flags |= LDLM_FL_BLOCK_GRANTED;
*flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
RETURN(ELDLM_OK);
if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT)
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
- lock_res_and_lock(lock);
- if (local && lock->l_req_mode == lock->l_granted_mode) {
+ lock_res_and_lock(lock);
+ if (local && ldlm_is_granted(lock)) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
* need to do anything else. */
RETURN(-EAGAIN);
/* lock was granted while resource was unlocked. */
- if (lock->l_granted_mode == lock->l_req_mode) {
+ if (ldlm_is_granted(lock)) {
/* bug 11300: if the lock has been granted,
* break earlier because otherwise, we will go
* to restart and ldlm_resource_unlink will be
ldlm_resource_unlink_lock(lock);
ldlm_lock_destroy_nolock(lock);
- if (lock->l_granted_mode == lock->l_req_mode)
- ldlm_pool_del(&ns->ns_pool, lock);
+ if (ldlm_is_granted(lock))
+ ldlm_pool_del(&ns->ns_pool, lock);
/* Make sure we will not be called again for same lock what is possible
* if not to zero out lock->l_granted_mode */
{
spin_lock_bh(&lock->l_export->exp_bl_list_lock);
if (list_empty(&lock->l_exp_list)) {
- if (lock->l_granted_mode != lock->l_req_mode)
+ if (!ldlm_is_granted(lock))
list_add_tail(&lock->l_exp_list,
&lock->l_export->exp_bl_list);
else
RETURN(0);
}
- if (lock->l_granted_mode != lock->l_req_mode) {
+ if (!ldlm_is_granted(lock)) {
/* this blocking AST will be communicated as part of the
* completion AST instead */
ldlm_add_blocked_lock(lock);
req->rq_no_resend = 1;
} else {
- LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ LASSERT(ldlm_is_granted(lock));
ldlm_add_waiting_lock(lock, ldlm_bl_timeout(lock));
unlock_res_and_lock(lock);
bl_lock->l_policy_data.l_inodebits.bits;
}
dlm_rep->lock_flags |= ldlm_flags_to_wire(LDLM_FL_AST_SENT);
- if (lock->l_granted_mode == lock->l_req_mode) {
+ if (ldlm_is_granted(lock)) {
/*
* Only cancel lock if it was granted, because it would
* be destroyed immediately and would never be granted
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
- if (lock->l_granted_mode == lock->l_req_mode ||
+ if (ldlm_is_granted(lock) ||
ldlm_is_destroyed(lock))
break;
}
}
if (ldlm_is_destroyed(lock) ||
- lock->l_granted_mode == lock->l_req_mode) {
+ ldlm_is_granted(lock)) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "Double grant race happened");
lock_res_and_lock(lock);
- if (lock->l_req_mode != lock->l_granted_mode) {
- unlock_res_and_lock(lock);
- return 0;
- }
+ if (!ldlm_is_granted(lock)) {
+ unlock_res_and_lock(lock);
+ return 0;
+ }
LASSERT(lock->l_resource);
if (lock->l_resource->lr_type != LDLM_IBITS &&
int rc;
ENTRY;
- LASSERT(lock->l_granted_mode != lock->l_req_mode);
+ LASSERT(!ldlm_is_granted(lock));
check_res_locked(res);
*err = ELDLM_OK;
/* Set a flag to prevent us from sending a CANCEL (bug 407) */
lock_res_and_lock(lock);
- /* Check that lock is not granted or failed, we might race. */
- if ((lock->l_req_mode != lock->l_granted_mode) &&
- !ldlm_is_failed(lock)) {
+ /* Check that lock is not granted or failed, we might race. */
+ if (!ldlm_is_granted(lock) && !ldlm_is_failed(lock)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
* b=17645*/
* Cannot unlock after the check either, a that still leaves
* a tiny window for completion to get in */
lock_res_and_lock(lock);
- if (lock->l_req_mode != lock->l_granted_mode)
+ if (!ldlm_is_granted(lock))
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
lock->l_lvb_data, lvb_len);
unlock_res_and_lock(lock);
* This happens whenever a lock enqueue is the request that triggers
* recovery.
*/
- if (lock->l_granted_mode == lock->l_req_mode)
+ if (ldlm_is_granted(lock))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
else if (!list_empty(&lock->l_res_link))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
break;
}
case LDLM_CB_CANCELING:
+ /* Nothing to do for non-granted locks */
+ if (!ldlm_is_granted(lock))
+ break;
+
if (ldlm_is_converting(lock)) {
/* this is called on already converted lock, so
* ibits has remained bits only and cancel_bits
}
/* Only fill layout if layout lock is granted */
- if (!ldlm_has_layout(lock) || lock->l_granted_mode != lock->l_req_mode)
+ if (!ldlm_has_layout(lock) || !ldlm_is_granted(lock))
GOTO(out, rc = 0);
/* XXX get fid by resource id. why don't include fid in ldlm_resource */
if (! ergo(ols->ols_state == OLS_GRANTED,
olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
+ ldlm_is_granted(olock) &&
ols->ols_hold))
return 0;
return 1;
/* Lock must have been granted. */
lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ if (ldlm_is_granted(dlmlock)) {
struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
LASSERT(dlmlock != NULL);
lock_res_and_lock(dlmlock);
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+ LASSERT(ldlm_is_granted(dlmlock));
/* there is no osc_lock associated with speculative locks */
osc_lock_lvb_update(env, osc, dlmlock, NULL);
LASSERT(flag == LDLM_CB_CANCELING);
lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+ if (!ldlm_is_granted(dlmlock)) {
dlmlock->l_ast_data = NULL;
unlock_res_and_lock(dlmlock);
RETURN(0);
* Cancel all unused and granted extent lock.
*/
if (lock->l_resource->lr_type == LDLM_EXTENT &&
- lock->l_granted_mode == lock->l_req_mode &&
+ ldlm_is_granted(lock) &&
osc_ldlm_weigh_ast(lock) == 0)
RETURN(1);