From 175ce732b881e99d77d3f5c11fd1d0d5f604218a Mon Sep 17 00:00:00 2001 From: Vitaly Fertman Date: Fri, 29 Aug 2014 02:33:54 +0400 Subject: [PATCH] LU-5496 ldlm: reconstruct proper flags on enqueue resend otherwise, waiting lock may get granted as no BLOCKED_GRANTED flag is returned Signed-off-by: Vitaly Fertman Xyratex-bug-id: MRP-1944 Change-Id: I5e938ff0454d5e8694b09f9fff3c1f82d746360d Reviewed-on: http://review.whamcloud.com/11644 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/include/lustre_dlm_flags.h | 6 ++++++ lustre/ldlm/ldlm_flock.c | 3 +-- lustre/ldlm/ldlm_lock.c | 15 ++++++++++++--- lustre/ldlm/ldlm_lockd.c | 11 +++++++---- lustre/ldlm/ldlm_request.c | 6 ++---- lustre/mgs/mgs_handler.c | 3 +-- 6 files changed, 29 insertions(+), 15 deletions(-) diff --git a/lustre/include/lustre_dlm_flags.h b/lustre/include/lustre_dlm_flags.h index ea70ea0..483e8da 100644 --- a/lustre/include/lustre_dlm_flags.h +++ b/lustre/include/lustre_dlm_flags.h @@ -372,6 +372,12 @@ LDLM_FL_NO_TIMEOUT |\ LDLM_FL_TEST_LOCK) +/** flags returned in @flags parameter on ldlm_lock_enqueue, + * to be re-constructed on re-send */ +#define LDLM_FL_SRV_ENQ_MASK (LDLM_FL_LOCK_CHANGED |\ + LDLM_FL_BLOCKED_MASK |\ + LDLM_FL_NO_TIMEOUT) + /** test for ldlm_lock flag bit set */ #define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0) diff --git a/lustre/ldlm/ldlm_flock.c b/lustre/ldlm/ldlm_flock.c index 238d4be..dc98cc0 100644 --- a/lustre/ldlm/ldlm_flock.c +++ b/lustre/ldlm/ldlm_flock.c @@ -685,8 +685,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) if (flags & LDLM_FL_FAILED) goto granted; - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { if (NULL == data) /* mds granted the lock in the reply */ goto granted; diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 3e132b4..6b81e74b 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1653,8 +1653,18 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, } } - if (*flags & LDLM_FL_RESENT) + if (*flags & LDLM_FL_RESENT) { + /* Reconstruct LDLM_FL_SRV_ENQ_MASK @flags for reply. + * Set LOCK_CHANGED always. + * Check if the lock is granted for BLOCK_GRANTED. + * Take NO_TIMEOUT from the lock as it is inherited through + * LDLM_FL_INHERIT_MASK */ + *flags |= LDLM_FL_LOCK_CHANGED; + if (lock->l_req_mode != lock->l_granted_mode) + *flags |= LDLM_FL_BLOCK_GRANTED; + *flags |= lock->l_flags & LDLM_FL_NO_TIMEOUT; RETURN(ELDLM_OK); + } /* For a replaying lock, it might be already in granted list. So * unlinking the lock will cause the interval node to be freed, we @@ -1668,8 +1678,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, /* The server returned a blocked lock, but it was granted * before we got a chance to actually enqueue it. We don't * need to do anything else. */ - *flags &= ~(LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT); + *flags &= ~LDLM_FL_BLOCKED_MASK; GOTO(out, rc = ELDLM_OK); } diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index ad2f233..5d1bc0d 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -1284,6 +1284,11 @@ int ldlm_handle_enqueue0(struct ldlm_namespace *ns, &lock->l_remote_handle, &lock->l_exp_hash); + /* Inherit the enqueue flags before the operation, because we do not + * keep the res lock on return and next operations (BL AST) may proceed + * without them. */ + lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & + LDLM_FL_INHERIT_MASK); existing_lock: if (flags & LDLM_FL_HAS_INTENT) { @@ -1321,7 +1326,6 @@ existing_lock: } dlm_rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); - dlm_rep->lock_flags = ldlm_flags_to_wire(flags); ldlm_lock2desc(lock, &dlm_rep->lock_desc); ldlm_lock2handle(lock, &dlm_rep->lock_handle); @@ -1332,9 +1336,8 @@ existing_lock: /* Now take into account flags to be inherited from original lock request both in reply to client and in our own lock flags. */ - dlm_rep->lock_flags |= dlm_req->lock_flags & LDLM_FL_INHERIT_MASK; - lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags & - LDLM_FL_INHERIT_MASK); + dlm_rep->lock_flags = ldlm_flags_to_wire(flags); + lock->l_flags |= flags & LDLM_FL_INHERIT_MASK; /* Don't move a pending lock onto the export if it has already been * disconnected due to eviction (bug 5683) or server umount (bug 24324). diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index 486ec0c..3c1a0a8 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -188,8 +188,7 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data) RETURN(0); } - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { wake_up(&lock->l_waitq); RETURN(ldlm_completion_tail(lock)); } @@ -238,8 +237,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data) goto noreproc; } - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { wake_up(&lock->l_waitq); RETURN(0); } diff --git a/lustre/mgs/mgs_handler.c b/lustre/mgs/mgs_handler.c index 2347b8c..7e196fb 100644 --- a/lustre/mgs/mgs_handler.c +++ b/lustre/mgs/mgs_handler.c @@ -145,8 +145,7 @@ static int mgs_completion_ast_generic(struct ldlm_lock *lock, __u64 flags, { ENTRY; - if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED | - LDLM_FL_BLOCK_CONV))) { + if (!(flags & LDLM_FL_BLOCKED_MASK)) { struct fs_db *fsdb; /* l_ast_data is used as a marker to avoid cancel ldlm lock -- 1.8.3.1