Whamcloud - gitweb
LU-2906 ldlm: use accessor macros for l_flags
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
index f5e63fe..8e4d453 100644 (file)
@@ -137,7 +137,7 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
 static inline void
 ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
 {
-        ENTRY;
+       ENTRY;
 
        LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
                   mode, flags);
@@ -145,11 +145,10 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
        /* Safe to not lock here, since it should be empty anyway */
        LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
 
-        cfs_list_del_init(&lock->l_res_link);
-        if (flags == LDLM_FL_WAIT_NOREPROC &&
-            !(lock->l_flags & LDLM_FL_FAILED)) {
-                /* client side - set a flag to prevent sending a CANCEL */
-                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+       cfs_list_del_init(&lock->l_res_link);
+       if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
+               /* client side - set a flag to prevent sending a CANCEL */
+               lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
 
                 /* when reaching here, it is under lock_res_and_lock(). Thus,
                    need call the nolock version of ldlm_lock_decref_internal*/
@@ -264,7 +263,7 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
                                "support flock canceliation\n");
        } else {
                LASSERT(lock->l_completion_ast);
-               LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+               LASSERT(!ldlm_is_ast_sent(lock));
                lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
                        LDLM_FL_FLOCK_DEADLOCK;
                ldlm_flock_blocking_unlink(lock);
@@ -646,7 +645,7 @@ ldlm_flock_interrupted_wait(void *data)
         ldlm_flock_blocking_unlink(lock);
 
        /* client side - set flag to prevent lock from being put on LRU list */
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
         unlock_res_and_lock(lock);
 
         EXIT;
@@ -734,7 +733,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 granted:
         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
 
-        if (lock->l_flags & LDLM_FL_FAILED) {
+        if (ldlm_is_failed(lock)) {
                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
                 RETURN(-EIO);
         }
@@ -747,7 +746,7 @@ granted:
        /* Protect against race where lock could have been just destroyed
         * due to overlap in ldlm_process_flock_lock().
         */
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (ldlm_is_destroyed(lock)) {
                unlock_res_and_lock(lock);
                LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
                RETURN(0);
@@ -759,7 +758,7 @@ granted:
         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
         cfs_list_del_init(&lock->l_res_link);
 
-       if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+       if (ldlm_is_flock_deadlock(lock)) {
                LDLM_DEBUG(lock, "client-side enqueue deadlock received");
                rc = -EDEADLK;
        } else if (flags & LDLM_FL_TEST_LOCK) {