LDLM_FL_NO_TIMEOUT |\
LDLM_FL_TEST_LOCK)
+/** flags returned in @flags parameter on ldlm_lock_enqueue,
+ * to be re-constructed on re-send */
+#define LDLM_FL_SRV_ENQ_MASK (LDLM_FL_LOCK_CHANGED |\
+ LDLM_FL_BLOCKED_MASK |\
+ LDLM_FL_NO_TIMEOUT)
+
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
if (flags & LDLM_FL_FAILED)
goto granted;
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
if (NULL == data)
/* mds granted the lock in the reply */
goto granted;
}
}
- if (*flags & LDLM_FL_RESENT)
+ if (*flags & LDLM_FL_RESENT) {
+ /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply.
+ * Set LOCK_CHANGED always.
+ * Check if the lock is granted for BLOCK_GRANTED.
+ * Take NO_TIMEOUT from the lock as it is inherited through
+ * LDLM_FL_INHERIT_MASK */
+ *flags |= LDLM_FL_LOCK_CHANGED;
+ if (lock->l_req_mode != lock->l_granted_mode)
+ *flags |= LDLM_FL_BLOCK_GRANTED;
+ *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT;
RETURN(ELDLM_OK);
+ }
/* For a replaying lock, it might be already in granted list. So
* unlinking the lock will cause the interval node to be freed, we
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
* need to do anything else. */
- *flags &= ~(LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
GOTO(out, rc = ELDLM_OK);
}
&lock->l_remote_handle,
&lock->l_exp_hash);
+ /* Inherit the enqueue flags before the operation, because we do not
+ * keep the res lock on return and next operations (BL AST) may proceed
+ * without them. */
+ lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
+ LDLM_FL_INHERIT_MASK);
existing_lock:
if (flags & LDLM_FL_HAS_INTENT) {
}
dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2desc(lock, &dlm_rep->lock_desc);
ldlm_lock2handle(lock, &dlm_rep->lock_handle);
/* Now take into account flags to be inherited from original lock
request both in reply to client and in our own lock flags. */
- dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_FL_INHERIT_MASK;
- lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_FL_INHERIT_MASK);
+ dlm_rep->lock_flags = ldlm_flags_to_wire(flags);
+ lock->l_flags |= flags & LDLM_FL_INHERIT_MASK;
/* Don't move a pending lock onto the export if it has already been
* disconnected due to eviction (bug 5683) or server umount (bug 24324).
RETURN(0);
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
RETURN(ldlm_completion_tail(lock));
}
goto noreproc;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
RETURN(0);
}
{
ENTRY;
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
struct fs_db *fsdb;
/* l_ast_data is used as a marker to avoid cancel ldlm lock