Whamcloud - gitweb
LU-12616 obclass: fix MDS start/stop race
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
index 2f6e357..be84993 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -31,7 +27,7 @@
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2010, 2012, Intel Corporation.
+ * Copyright (c) 2010, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_LDLM
 
-#ifdef __KERNEL__
+#include <linux/list.h>
 #include <lustre_dlm.h>
 #include <obd_support.h>
 #include <obd_class.h>
 #include <lustre_lib.h>
-#include <libcfs/list.h>
-#else
-#include <liblustre.h>
-#include <obd_class.h>
-#endif
 
 #include "ldlm_internal.h"
 
@@ -107,13 +98,13 @@ static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
         if (req->l_export == NULL)
                return;
 
-       LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+       LASSERT(hlist_unhashed(&req->l_exp_flock_hash));
 
         req->l_policy_data.l_flock.blocking_owner =
                 lock->l_policy_data.l_flock.owner;
         req->l_policy_data.l_flock.blocking_export =
                lock->l_export;
-       req->l_policy_data.l_flock.blocking_refs = 0;
+       atomic_set(&req->l_policy_data.l_flock.blocking_refs, 0);
 
        cfs_hash_add(req->l_export->exp_flock_hash,
                     &req->l_policy_data.l_flock.owner,
@@ -128,28 +119,27 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
 
        check_res_locked(req->l_resource);
        if (req->l_export->exp_flock_hash != NULL &&
-           !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+           !hlist_unhashed(&req->l_exp_flock_hash))
                cfs_hash_del(req->l_export->exp_flock_hash,
                             &req->l_policy_data.l_flock.owner,
                             &req->l_exp_flock_hash);
 }
 
 static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
 {
-        ENTRY;
+       ENTRY;
 
-       LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
+       LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: %#llx)",
                   mode, flags);
 
        /* Safe to not lock here, since it should be empty anyway */
-       LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
+       LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
 
-        cfs_list_del_init(&lock->l_res_link);
-        if (flags == LDLM_FL_WAIT_NOREPROC &&
-            !(lock->l_flags & LDLM_FL_FAILED)) {
-                /* client side - set a flag to prevent sending a CANCEL */
-                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
+       list_del_init(&lock->l_res_link);
+       if (flags == LDLM_FL_WAIT_NOREPROC) {
+               /* client side - set a flag to prevent sending a CANCEL */
+               lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
 
                 /* when reaching here, it is under lock_res_and_lock(). Thus,
                    need call the nolock version of ldlm_lock_decref_internal*/
@@ -169,6 +159,31 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
  * one client holds a lock on something and want a lock on something
  * else and at the same time another client has the opposite situation).
  */
+
+struct ldlm_flock_lookup_cb_data {
+       __u64 *bl_owner;
+       struct ldlm_lock *lock;
+       struct obd_export *exp;
+};
+
+static int ldlm_flock_lookup_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                               struct hlist_node *hnode, void *data)
+{
+       struct ldlm_flock_lookup_cb_data *cb_data = data;
+       struct obd_export *exp = cfs_hash_object(hs, hnode);
+       struct ldlm_lock *lock;
+
+       lock = cfs_hash_lookup(exp->exp_flock_hash, cb_data->bl_owner);
+       if (lock == NULL)
+               return 0;
+
+       /* Stop on first found lock. Same process can't sleep twice */
+       cb_data->lock = lock;
+       cb_data->exp = class_export_get(exp);
+
+       return 1;
+}
+
 static int
 ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
 {
@@ -183,16 +198,26 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
 
         class_export_get(bl_exp);
        while (1) {
+               struct ldlm_flock_lookup_cb_data cb_data = {
+                                       .bl_owner = &bl_owner,
+                                       .lock = NULL,
+                                       .exp = NULL };
                struct obd_export *bl_exp_new;
                struct ldlm_lock *lock = NULL;
                struct ldlm_flock *flock;
 
-               if (bl_exp->exp_flock_hash != NULL)
-                       lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
-                                              &bl_owner);
+               if (bl_exp->exp_flock_hash != NULL) {
+                       cfs_hash_for_each_key(bl_exp->exp_obd->obd_nid_hash,
+                               &bl_exp->exp_connection->c_peer.nid,
+                               ldlm_flock_lookup_cb, &cb_data);
+                       lock = cb_data.lock;
+               }
                if (lock == NULL)
                        break;
 
+               class_export_put(bl_exp);
+               bl_exp = cb_data.exp;
+
                LASSERT(req != lock);
                flock = &lock->l_policy_data.l_flock;
                LASSERT(flock->owner == bl_owner);
@@ -203,7 +228,12 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
                cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
                 bl_exp = bl_exp_new;
 
-                if (bl_owner == req_owner && bl_exp == req_exp) {
+               if (bl_exp->exp_failed)
+                       break;
+
+               if (bl_owner == req_owner &&
+                   (bl_exp->exp_connection->c_peer.nid ==
+                    req_exp->exp_connection->c_peer.nid)) {
                         class_export_put(bl_exp);
                         return 1;
                 }
@@ -214,7 +244,7 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
 }
 
 static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
-                                                cfs_list_t *work_list)
+                                         struct list_head *work_list)
 {
        CDEBUG(D_INFO, "reprocess deadlock req=%p\n", lock);
 
@@ -224,7 +254,7 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
                                "support flock canceliation\n");
        } else {
                LASSERT(lock->l_completion_ast);
-               LASSERT((lock->l_flags & LDLM_FL_AST_SENT) == 0);
+               LASSERT(!ldlm_is_ast_sent(lock));
                lock->l_flags |= LDLM_FL_AST_SENT | LDLM_FL_CANCEL_ON_BLOCK |
                        LDLM_FL_FLOCK_DEADLOCK;
                ldlm_flock_blocking_unlink(lock);
@@ -240,38 +270,31 @@ static void ldlm_flock_cancel_on_deadlock(struct ldlm_lock *lock,
  * This function looks for any conflicts for \a lock in the granted or
  * waiting queues. The lock is granted if no conflicts are found in
  * either queue.
- *
- * It is also responsible for splitting a lock if a portion of the lock
- * is released.
- *
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- *   - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- *   - blocking ASTs have not been sent yet, so list of conflicting locks
- *     would be collected and ASTs sent.
  */
 int
-ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
-                       ldlm_error_t *err, cfs_list_t *work_list)
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
+                       enum ldlm_process_intention intention,
+                       enum ldlm_error *err, struct list_head *work_list)
 {
-        struct ldlm_resource *res = req->l_resource;
-        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
-        cfs_list_t *tmp;
-        cfs_list_t *ownlocks = NULL;
-        struct ldlm_lock *lock = NULL;
-        struct ldlm_lock *new = req;
-        struct ldlm_lock *new2 = NULL;
-        ldlm_mode_t mode = req->l_req_mode;
-        int local = ns_is_client(ns);
-        int added = (mode == LCK_NL);
-        int overlaps = 0;
-        int splitted = 0;
-        const struct ldlm_callback_suite null_cbs = { NULL };
-        ENTRY;
+       struct ldlm_resource *res = req->l_resource;
+       struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+       struct list_head *tmp;
+       struct list_head *ownlocks = NULL;
+       struct ldlm_lock *lock = NULL;
+       struct ldlm_lock *new = req;
+       struct ldlm_lock *new2 = NULL;
+       enum ldlm_mode mode = req->l_req_mode;
+       int local = ns_is_client(ns);
+       int added = (mode == LCK_NL);
+       int overlaps = 0;
+       int splitted = 0;
+       const struct ldlm_callback_suite null_cbs = { NULL };
+       struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
+                                                       NULL : work_list;
+       ENTRY;
 
-       CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
-              LPU64" end "LPU64"\n", *flags,
+       CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start "
+              "%llu end %llu\n", *flags,
               new->l_policy_data.l_flock.owner,
                new->l_policy_data.l_flock.pid, mode,
                req->l_policy_data.l_flock.start,
@@ -292,8 +315,8 @@ reprocess:
         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
                 /* This loop determines where this processes locks start
                  * in the resource lr_granted list. */
-                cfs_list_for_each(tmp, &res->lr_granted) {
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+               list_for_each(tmp, &res->lr_granted) {
+                       lock = list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
                         if (ldlm_same_flock_owner(lock, req)) {
                                 ownlocks = tmp;
@@ -306,8 +329,8 @@ reprocess:
 
                 /* This loop determines if there are existing locks
                  * that conflict with the new lock request. */
-                cfs_list_for_each(tmp, &res->lr_granted) {
-                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+               list_for_each(tmp, &res->lr_granted) {
+                       lock = list_entry(tmp, struct ldlm_lock,
                                               l_res_link);
 
                         if (ldlm_same_flock_owner(lock, req)) {
@@ -323,11 +346,11 @@ reprocess:
                         if (!ldlm_flocks_overlap(lock, req))
                                 continue;
 
-                       if (!first_enq) {
+                       if (intention != LDLM_PROCESS_ENQUEUE) {
                                reprocess_failed = 1;
                                if (ldlm_flock_deadlock(req, lock)) {
                                        ldlm_flock_cancel_on_deadlock(req,
-                                                       work_list);
+                                                       grant_work);
                                        RETURN(LDLM_ITER_CONTINUE);
                                }
                                continue;
@@ -389,7 +412,7 @@ reprocess:
                 ownlocks = &res->lr_granted;
 
         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
-                lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
+               lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
 
                 if (!ldlm_same_flock_owner(lock, new))
                         break;
@@ -477,20 +500,20 @@ reprocess:
                 /* XXX - if ldlm_lock_new() can sleep we should
                  * release the lr_lock, allocate the new lock,
                  * and restart processing this lock. */
-                if (!new2) {
-                        unlock_res_and_lock(req);
+               if (new2 == NULL) {
+                       unlock_res_and_lock(req);
                        new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
                                                lock->l_granted_mode, &null_cbs,
                                                NULL, 0, LVB_T_NONE);
-                        lock_res_and_lock(req);
-                        if (!new2) {
-                                ldlm_flock_destroy(req, lock->l_granted_mode,
-                                                   *flags);
-                                *err = -ENOLCK;
-                                RETURN(LDLM_ITER_STOP);
-                        }
-                        goto reprocess;
-                }
+                       lock_res_and_lock(req);
+                       if (IS_ERR(new2)) {
+                               ldlm_flock_destroy(req, lock->l_granted_mode,
+                                                  *flags);
+                               *err = PTR_ERR(new2);
+                               RETURN(LDLM_ITER_STOP);
+                       }
+                       goto reprocess;
+               }
 
                 splitted = 1;
 
@@ -509,7 +532,7 @@ reprocess:
                 if (lock->l_export != NULL) {
                         new2->l_export = class_export_lock_get(lock->l_export, new2);
                         if (new2->l_export->exp_lock_hash &&
-                            cfs_hlist_unhashed(&new2->l_exp_hash))
+                           hlist_unhashed(&new2->l_exp_hash))
                                 cfs_hash_add(new2->l_export->exp_lock_hash,
                                              &new2->l_remote_handle,
                                              &new2->l_exp_hash);
@@ -533,14 +556,14 @@ reprocess:
 
         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
         if (!added) {
-                cfs_list_del_init(&req->l_res_link);
+               list_del_init(&req->l_res_link);
                 /* insert new lock before ownlocks in list. */
                 ldlm_resource_add_lock(res, ownlocks, req);
         }
 
         if (*flags != LDLM_FL_WAIT_NOREPROC) {
 #ifdef HAVE_SERVER_SUPPORT
-                if (first_enq) {
+               if (intention == LDLM_PROCESS_ENQUEUE) {
                         /* If this is an unlock, reprocess the waitq and
                          * send completions ASTs for locks that can now be
                          * granted. The only problem with doing this
@@ -548,25 +571,28 @@ reprocess:
                          * newly granted locks will be sent before the unlock
                          * completion is sent. It shouldn't be an issue. Also
                          * note that ldlm_process_flock_lock() will recurse,
-                         * but only once because first_enq will be false from
-                         * ldlm_reprocess_queue. */
-                        if ((mode == LCK_NL) && overlaps) {
-                                CFS_LIST_HEAD(rpc_list);
+                        * but only once because 'intention' won't be
+                        * LDLM_PROCESS_ENQUEUE from ldlm_reprocess_queue. */
+                       if ((mode == LCK_NL) && overlaps) {
+                               struct list_head rpc_list;
                                 int rc;
+
+                               INIT_LIST_HEAD(&rpc_list);
 restart:
-                                ldlm_reprocess_queue(res, &res->lr_waiting,
-                                                     &rpc_list);
+                               ldlm_reprocess_queue(res, &res->lr_waiting,
+                                                    &rpc_list,
+                                                    LDLM_PROCESS_RESCAN, NULL);
 
                                 unlock_res_and_lock(req);
                                 rc = ldlm_run_ast_work(ns, &rpc_list,
                                                        LDLM_WORK_CP_AST);
                                 lock_res_and_lock(req);
-                                if (rc == -ERESTART)
-                                        GOTO(restart, -ERESTART);
+                               if (rc == -ERESTART)
+                                       GOTO(restart, rc);
                        }
                 } else {
                         LASSERT(req->l_completion_ast);
-                        ldlm_add_ast_work_item(req, NULL, work_list);
+                       ldlm_add_ast_work_item(req, NULL, grant_work);
                 }
 #else /* !HAVE_SERVER_SUPPORT */
                 /* The only one possible case for client-side calls flock
@@ -606,7 +632,7 @@ ldlm_flock_interrupted_wait(void *data)
         ldlm_flock_blocking_unlink(lock);
 
        /* client side - set flag to prevent lock from being put on LRU list */
-        lock->l_flags |= LDLM_FL_CBPENDING;
+       ldlm_set_cbpending(lock);
         unlock_res_and_lock(lock);
 
         EXIT;
@@ -625,45 +651,38 @@ ldlm_flock_interrupted_wait(void *data)
 int
 ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 {
-       struct file_lock                *getlk = lock->l_ast_data;
-        struct obd_device              *obd;
-        struct obd_import              *imp = NULL;
-        struct ldlm_flock_wait_data     fwd;
-        struct l_wait_info              lwi;
-        ldlm_error_t                    err;
-        int                             rc = 0;
-        ENTRY;
+       struct file_lock *getlk = lock->l_ast_data;
+       struct obd_device *obd;
+       struct obd_import *imp = NULL;
+       struct ldlm_flock_wait_data fwd;
+       struct l_wait_info lwi;
+       enum ldlm_error err;
+       int rc = 0;
+       ENTRY;
 
-       CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
-               flags, data, getlk);
-
-        /* Import invalidation. We need to actually release the lock
-         * references being held, so that it can go away. No point in
-         * holding the lock even if app still believes it has it, since
-         * server already dropped it anyway. Only for granted locks too. */
-        if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
-            (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
-                if (lock->l_req_mode == lock->l_granted_mode &&
-                    lock->l_granted_mode != LCK_NL &&
-                    NULL == data)
-                        ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
-                /* Need to wake up the waiter if we were evicted */
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(0);
-        }
+       OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
+       if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
+               lock_res_and_lock(lock);
+               lock->l_flags |= LDLM_FL_FAIL_LOC;
+               unlock_res_and_lock(lock);
+               OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
+       }
+       CDEBUG(D_DLMTRACE, "flags: %#llx data: %p getlk: %p\n",
+              flags, data, getlk);
 
-        LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
+       LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
 
-        if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
-                       LDLM_FL_BLOCK_CONV))) {
-                if (NULL == data)
-                        /* mds granted the lock in the reply */
-                        goto granted;
-                /* CP AST RPC: lock get granted, wake it up */
-                cfs_waitq_signal(&lock->l_waitq);
-                RETURN(0);
-        }
+       if (flags & LDLM_FL_FAILED)
+               goto granted;
+
+       if (!(flags & LDLM_FL_BLOCKED_MASK)) {
+               if (NULL == data)
+                       /* mds granted the lock in the reply */
+                       goto granted;
+               /* CP AST RPC: lock get granted, wake it up */
+               wake_up(&lock->l_waitq);
+               RETURN(0);
+       }
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
@@ -694,50 +713,95 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 granted:
         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
 
-       if (lock->l_flags & LDLM_FL_DESTROYED) {
+       if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
+               lock_res_and_lock(lock);
+               /* DEADLOCK is always set with CBPENDING */
+               lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+               unlock_res_and_lock(lock);
+               OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
+       }
+       if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
+               lock_res_and_lock(lock);
+               /* DEADLOCK is always set with CBPENDING */
+               lock->l_flags |= LDLM_FL_FAIL_LOC |
+                                LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+               unlock_res_and_lock(lock);
+               OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
+       }
+
+       lock_res_and_lock(lock);
+
+
+       /* Protect against race where lock could have been just destroyed
+        * due to overlap in ldlm_process_flock_lock().
+        */
+       if (ldlm_is_destroyed(lock)) {
+               unlock_res_and_lock(lock);
                LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
-               RETURN(0);
+
+               /* An error is still to be returned, to propagate it up to
+                * ldlm_cli_enqueue_fini() caller. */
+               RETURN(-EIO);
        }
 
-        if (lock->l_flags & LDLM_FL_FAILED) {
-                LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
-                RETURN(-EIO);
-        }
+       /* ldlm_lock_enqueue() has already placed lock on the granted list. */
+       ldlm_resource_unlink_lock(lock);
+
+       /* Import invalidation. We need to actually release the lock
+        * references being held, so that it can go away. No point in
+        * holding the lock even if app still believes it has it, since
+        * server already dropped it anyway. Only for granted locks too. */
+       /* Do the same for DEADLOCK'ed locks. */
+       if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
+               int mode;
+
+               if (flags & LDLM_FL_TEST_LOCK)
+                       LASSERT(ldlm_is_test_lock(lock));
+
+               if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
+                       mode = getlk->fl_type;
+               else
+                       mode = lock->l_granted_mode;
+
+               if (ldlm_is_flock_deadlock(lock)) {
+                       LDLM_DEBUG(lock, "client-side enqueue deadlock "
+                                  "received");
+                       rc = -EDEADLK;
+               }
+               ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
+               unlock_res_and_lock(lock);
 
-        LDLM_DEBUG(lock, "client-side enqueue granted");
+               /* Need to wake up the waiter if we were evicted */
+               wake_up(&lock->l_waitq);
 
-       lock_res_and_lock(lock);
+               /* An error is still to be returned, to propagate it up to
+                * ldlm_cli_enqueue_fini() caller. */
+               RETURN(rc ? : -EIO);
+       }
 
-       /* take lock off the deadlock detection hash list. */
-        ldlm_flock_blocking_unlink(lock);
+       LDLM_DEBUG(lock, "client-side enqueue granted");
 
-        /* ldlm_lock_enqueue() has already placed lock on the granted list. */
-        cfs_list_del_init(&lock->l_res_link);
-
-       if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
-               LDLM_DEBUG(lock, "client-side enqueue deadlock received");
-               rc = -EDEADLK;
-       } else if (flags & LDLM_FL_TEST_LOCK) {
-                /* fcntl(F_GETLK) request */
-                /* The old mode was saved in getlk->fl_type so that if the mode
-                 * in the lock changes we can decref the appropriate refcount.*/
-               ldlm_flock_destroy(lock, flock_type(getlk),
-                                  LDLM_FL_WAIT_NOREPROC);
+       if (flags & LDLM_FL_TEST_LOCK) {
+               /*
+                * fcntl(F_GETLK) request
+                * The old mode was saved in getlk->fl_type so that if the mode
+                * in the lock changes we can decref the appropriate refcount.
+                */
+               LASSERT(ldlm_is_test_lock(lock));
+               ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
                switch (lock->l_granted_mode) {
                case LCK_PR:
-                       flock_set_type(getlk, F_RDLCK);
+                       getlk->fl_type = F_RDLCK;
                        break;
                case LCK_PW:
-                       flock_set_type(getlk, F_WRLCK);
+                       getlk->fl_type = F_WRLCK;
                        break;
                default:
-                       flock_set_type(getlk, F_UNLCK);
+                       getlk->fl_type = F_UNLCK;
                }
-               flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
-               flock_set_start(getlk,
-                               (loff_t)lock->l_policy_data.l_flock.start);
-               flock_set_end(getlk,
-                             (loff_t)lock->l_policy_data.l_flock.end);
+               getlk->fl_pid = (pid_t)lock->l_policy_data.l_flock.pid;
+               getlk->fl_start = (loff_t)lock->l_policy_data.l_flock.start;
+               getlk->fl_end = (loff_t)lock->l_policy_data.l_flock.end;
        } else {
                __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
 
@@ -765,104 +829,89 @@ int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
         RETURN(0);
 }
 
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
-                                       ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+                                    union ldlm_policy_data *lpolicy)
 {
-        memset(lpolicy, 0, sizeof(*lpolicy));
-        lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
-        lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
-        lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
-        /* Compat code, old clients had no idea about owner field and
-         * relied solely on pid for ownership. Introduced in LU-104, 2.1,
-         * April 2011 */
-        lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
+       lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+       lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+       lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+       lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
 }
 
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
-                                       ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+                                    union ldlm_wire_policy_data *wpolicy)
 {
-        memset(lpolicy, 0, sizeof(*lpolicy));
-        lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
-        lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
-        lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
-        lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
-}
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-                                     ldlm_wire_policy_data_t *wpolicy)
-{
-        memset(wpolicy, 0, sizeof(*wpolicy));
-        wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
-        wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
-        wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
-        wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+       memset(wpolicy, 0, sizeof(*wpolicy));
+       wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+       wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+       wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+       wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
 }
 
 /*
  * Export handle<->flock hash operations.
  */
 static unsigned
-ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+ldlm_export_flock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(*(__u64 *)key, mask);
 }
 
 static void *
-ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+ldlm_export_flock_key(struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
        return &lock->l_policy_data.l_flock.owner;
 }
 
 static int
-ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+ldlm_export_flock_keycmp(const void *key, struct hlist_node *hnode)
 {
        return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
 }
 
 static void *
-ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+ldlm_export_flock_object(struct hlist_node *hnode)
 {
-       return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       return hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
 }
 
 static void
-ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
        LDLM_LOCK_GET(lock);
 
        flock = &lock->l_policy_data.l_flock;
        LASSERT(flock->blocking_export != NULL);
        class_export_get(flock->blocking_export);
-       flock->blocking_refs++;
+       atomic_inc(&flock->blocking_refs);
 }
 
 static void
-ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+ldlm_export_flock_put(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct ldlm_lock *lock;
        struct ldlm_flock *flock;
 
-       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
-       LDLM_LOCK_RELEASE(lock);
+       lock = hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
 
        flock = &lock->l_policy_data.l_flock;
        LASSERT(flock->blocking_export != NULL);
        class_export_put(flock->blocking_export);
-       if (--flock->blocking_refs == 0) {
+       if (atomic_dec_and_test(&flock->blocking_refs)) {
                flock->blocking_owner = 0;
                flock->blocking_export = NULL;
        }
+       LDLM_LOCK_RELEASE(lock);
 }
 
-static cfs_hash_ops_t ldlm_export_flock_ops = {
+static struct cfs_hash_ops ldlm_export_flock_ops = {
        .hs_hash        = ldlm_export_flock_hash,
        .hs_key         = ldlm_export_flock_key,
        .hs_keycmp      = ldlm_export_flock_keycmp,
@@ -890,7 +939,6 @@ int ldlm_init_flock_export(struct obd_export *exp)
 
        RETURN(0);
 }
-EXPORT_SYMBOL(ldlm_init_flock_export);
 
 void ldlm_destroy_flock_export(struct obd_export *exp)
 {
@@ -901,4 +949,3 @@ void ldlm_destroy_flock_export(struct obd_export *exp)
        }
        EXIT;
 }
-EXPORT_SYMBOL(ldlm_destroy_flock_export);