From 563745cbbd6f574c33d8b6a935b4b81715db6b3d Mon Sep 17 00:00:00 2001 From: Vitaly Fertman Date: Mon, 29 Sep 2014 19:25:47 -0400 Subject: [PATCH] LU-5496 ldlm: reconstruct proper flags on enqueue resend otherwise, waiting lock may get granted as no BLOCKED_GRANTED flag is returned Lustre-commit: 175ce732b881e99d77d3f5c11fd1d0d5f604218a Lustre-change: http://review.whamcloud.com/11644 Signed-off-by: Vitaly Fertman Xyratex-bug-id: MRP-1944 Change-Id: I5e938ff0454d5e8694b09f9fff3c1f82d746360d Reviewed-on: http://review.whamcloud.com/11644 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/include/lustre_dlm_flags.h | 6 ++++++ lustre/ldlm/ldlm_flock.c | 3 +-- lustre/ldlm/ldlm_lock.c | 15 ++++++++++++--- lustre/ldlm/ldlm_lockd.c | 11 +++++++---- lustre/ldlm/ldlm_request.c | 6 ++---- lustre/mgs/mgs_handler.c | 3 +-- 6 files changed, 29 insertions(+), 15 deletions(-) diff --git a/lustre/include/lustre_dlm_flags.h b/lustre/include/lustre_dlm_flags.h index 7750d5b..def9cb1 100644 --- a/lustre/include/lustre_dlm_flags.h +++ b/lustre/include/lustre_dlm_flags.h @@ -373,6 +373,12 @@ #define ldlm_set_excl(_l) LDLM_SET_FLAG(( _l), 1ULL << 55) #define ldlm_clear_excl(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 55) +/** flags returned in @flags parameter on ldlm_lock_enqueue, + * to be re-constructed on re-send */ +#define LDLM_FL_SRV_ENQ_MASK (LDLM_FL_LOCK_CHANGED |\ + LDLM_FL_BLOCKED_MASK |\ + LDLM_FL_NO_TIMEOUT) + /** test for ldlm_lock flag bit set */ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) diff --git a/lustre/ldlm/ldlm_flock.c b/lustre/ldlm/ldlm_flock.c index f5e63fe..7eca413 100644 --- a/lustre/ldlm/ldlm_flock.c +++ b/lustre/ldlm/ldlm_flock.c @@ -695,8 +695,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) LASSERT(flags != LDLM_FL_WAIT_NOREPROC); - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { if (NULL == data) /* mds granted the lock in the reply */ goto granted; diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 49c30a7..8e9751e 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1687,8 +1687,18 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, } } - if (*flags & LDLM_FL_RESENT) + if (*flags & LDLM_FL_RESENT) { + /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply. + * Set LOCK_CHANGED always. + * Check if the lock is granted for BLOCK_GRANTED. + * Take NO_TIMEOUT from the lock as it is inherited through + * LDLM_FL_INHERIT_MASK */ + *flags |= LDLM_FL_LOCK_CHANGED; + if (lock->l_req_mode != lock->l_granted_mode) + *flags |= LDLM_FL_BLOCK_GRANTED; + *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT; RETURN(ELDLM_OK); + } /* For a replaying lock, it might be already in granted list. So * unlinking the lock will cause the interval node to be freed, we @@ -1702,8 +1712,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't * need to do anything else. */ - *flags &= ~(LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); + *flags &= ~LDLM_FL_BLOCKED_MASK; GOTO(out, ELDLM_OK); } diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index a89b2f8..a66d0ac 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -1282,6 +1282,11 @@ int ldlm_handle_enqueue0(struct ldlm_namespace *ns, &lock->l_remote_handle, &lock->l_exp_hash); + /* Inherit the enqueue flags before the operation, because we do not + * keep the res lock on return and next operations (BL AST) may proceed + * without them. */ + lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & + LDLM_FL_INHERIT_MASK); existing_lock: if (flags & LDLM_FL_HAS_INTENT) { @@ -1319,7 +1324,6 @@ existing_lock: } dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - dlm_rep->lock_flags = ldlm_flags_to_wire(flags); ldlm_lock2desc(lock, &dlm_rep->lock_desc); ldlm_lock2handle(lock, &dlm_rep->lock_handle); @@ -1330,9 +1334,8 @@ existing_lock: /* Now take into account flags to be inherited from original lock request both in reply to client and in our own lock flags. */ - dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_INHERIT_FLAGS; - lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & - LDLM_INHERIT_FLAGS); + dlm_rep->lock_flags = ldlm_flags_to_wire(flags); + lock->l_flags |= flags & LDLM_FL_INHERIT_MASK; /* Don't move a pending lock onto the export if it has already been * disconnected due to eviction (bug 5683) or server umount (bug 24324). diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index d59e29d..a67cd22 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -192,8 +192,7 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data) RETURN(0); } - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { wake_up(&lock->l_waitq); RETURN(ldlm_completion_tail(lock)); } @@ -242,8 +241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) goto noreproc; } - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { wake_up(&lock->l_waitq); RETURN(0); } diff --git a/lustre/mgs/mgs_handler.c b/lustre/mgs/mgs_handler.c index bffde6a..fe485c7 100644 --- a/lustre/mgs/mgs_handler.c +++ b/lustre/mgs/mgs_handler.c @@ -143,8 +143,7 @@ static int mgs_completion_ast_generic(struct ldlm_lock *lock, __u64 flags, { ENTRY; - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { struct fs_db *fsdb; /* l_ast_data is used as a marker to avoid cancel ldlm lock -- 1.8.3.1