Whamcloud - gitweb
LU-2835 ptlrpc: Fix race during exp_flock_hash creation
[fs/lustre-release.git] / lustre / ldlm / ldlm_flock.c
index 1089022..a765cc7 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
- * Use is subject to license terms.
- *
  * Copyright (c) 2003 Hewlett-Packard Development Company LP.
  * Developed under the sponsorship of the US Government under
  * Subcontract No. B514193
+ *
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2010, 2012, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  * Lustre is a trademark of Sun Microsystems, Inc.
  */
 
+/**
+ * This file implements POSIX lock type for Lustre.
+ * Its policy properties are start and end of extent and PID.
+ *
+ * These locks are only done through MDS due to POSIX semantics requiring
+ * e.g. that locks could be only partially released and as such split into
+ * two parts, and also that two adjacent locks from the same process may be
+ * merged into a single wider lock.
+ *
+ * Lock modes are mapped like this:
+ * PR and PW for READ and WRITE locks
+ * NL to request a releasing of a portion of the lock
+ *
+ * These flock locks never timeout.
+ */
+
 #define DEBUG_SUBSYSTEM S_LDLM
 
 #ifdef __KERNEL__
 
 #include "ldlm_internal.h"
 
-#define l_flock_waitq   l_lru
-
-/**
- * Wait queue for Posix lock deadlock detection, added with
- * ldlm_lock::l_flock_waitq.
- */
-static CFS_LIST_HEAD(ldlm_flock_waitq);
-/**
- * Lock protecting access to ldlm_flock_waitq.
- */
-spinlock_t ldlm_flock_waitq_lock = SPIN_LOCK_UNLOCKED;
-
 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                             void *data, int flag);
 
 /**
  * list_for_remaining_safe - iterate over the remaining entries in a list
  *              and safeguard against removal of a list entry.
- * @pos:        the &struct list_head to use as a loop counter. pos MUST
+ * \param pos   the &struct list_head to use as a loop counter. pos MUST
  *              have been initialized prior to using it in this macro.
- * @n:          another &struct list_head to use as temporary storage
- * @head:       the head for your list.
+ * \param n     another &struct list_head to use as temporary storage
+ * \param head  the head for your list.
  */
 #define list_for_remaining_safe(pos, n, head) \
         for (n = pos->next; pos != (head); pos = n, n = pos->next)
@@ -82,8 +86,8 @@ int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 static inline int
 ldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new)
 {
-        return((new->l_policy_data.l_flock.pid ==
-                lock->l_policy_data.l_flock.pid) &&
+        return((new->l_policy_data.l_flock.owner ==
+                lock->l_policy_data.l_flock.owner) &&
                (new->l_export == lock->l_export));
 }
 
@@ -96,19 +100,54 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
                 lock->l_policy_data.l_flock.start));
 }
 
+static inline void ldlm_flock_blocking_link(struct ldlm_lock *req,
+                                           struct ldlm_lock *lock)
+{
+        /* For server only */
+        if (req->l_export == NULL)
+               return;
+
+       LASSERT(cfs_hlist_unhashed(&req->l_exp_flock_hash));
+
+        req->l_policy_data.l_flock.blocking_owner =
+                lock->l_policy_data.l_flock.owner;
+        req->l_policy_data.l_flock.blocking_export =
+               lock->l_export;
+       req->l_policy_data.l_flock.blocking_refs = 0;
+
+       cfs_hash_add(req->l_export->exp_flock_hash,
+                    &req->l_policy_data.l_flock.owner,
+                    &req->l_exp_flock_hash);
+}
+
+static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
+{
+        /* For server only */
+        if (req->l_export == NULL)
+                return;
+
+       check_res_locked(req->l_resource);
+       if (req->l_export->exp_flock_hash != NULL &&
+           !cfs_hlist_unhashed(&req->l_exp_flock_hash))
+               cfs_hash_del(req->l_export->exp_flock_hash,
+                            &req->l_policy_data.l_flock.owner,
+                            &req->l_exp_flock_hash);
+}
+
 static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
 {
         ENTRY;
 
-        LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",
-                   mode, flags);
+       LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
+                  mode, flags);
 
-        /* Safe to not lock here, since it should be empty anyway */
-        LASSERT(list_empty(&lock->l_flock_waitq));
+       /* Safe to not lock here, since it should be empty anyway */
+       LASSERT(cfs_hlist_unhashed(&lock->l_exp_flock_hash));
 
-        list_del_init(&lock->l_res_link);
-        if (flags == LDLM_FL_WAIT_NOREPROC) {
+        cfs_list_del_init(&lock->l_res_link);
+        if (flags == LDLM_FL_WAIT_NOREPROC &&
+            !(lock->l_flags & LDLM_FL_FAILED)) {
                 /* client side - set a flag to prevent sending a CANCEL */
                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
 
@@ -121,45 +160,85 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags)
         EXIT;
 }
 
+/**
+ * POSIX locks deadlock detection code.
+ *
+ * Given a new lock \a req and an existing lock \a bl_lock it conflicts
+ * with, we need to iterate through all blocked POSIX locks for this
+ * export and see if there is a deadlock condition arising. (i.e. when
+ * one client holds a lock on something and want a lock on something
+ * else and at the same time another client has the opposite situation).
+ */
 static int
-ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock)
+ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
 {
-        struct obd_export *req_export = req->l_export;
-        struct obd_export *blocking_export = blocking_lock->l_export;
-        pid_t req_pid = req->l_policy_data.l_flock.pid;
-        pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;
-        struct ldlm_lock *lock;
-
-        spin_lock(&ldlm_flock_waitq_lock);
-restart:
-        list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {
-                if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||
-                    (lock->l_export != blocking_export))
-                        continue;
-
-                blocking_pid = lock->l_policy_data.l_flock.blocking_pid;
-                blocking_export = (struct obd_export *)(long)
-                        lock->l_policy_data.l_flock.blocking_export;
-                if (blocking_pid == req_pid && blocking_export == req_export) {
-                        spin_unlock(&ldlm_flock_waitq_lock);
+        struct obd_export *req_exp = req->l_export;
+        struct obd_export *bl_exp = bl_lock->l_export;
+        __u64 req_owner = req->l_policy_data.l_flock.owner;
+        __u64 bl_owner = bl_lock->l_policy_data.l_flock.owner;
+
+        /* For server only */
+        if (req_exp == NULL)
+                return 0;
+
+        class_export_get(bl_exp);
+       while (1) {
+               struct obd_export *bl_exp_new;
+               struct ldlm_lock *lock = NULL;
+               struct ldlm_flock *flock;
+
+               if (bl_exp->exp_flock_hash != NULL)
+                       lock = cfs_hash_lookup(bl_exp->exp_flock_hash,
+                                              &bl_owner);
+               if (lock == NULL)
+                       break;
+
+               LASSERT(req != lock);
+               flock = &lock->l_policy_data.l_flock;
+               LASSERT(flock->owner == bl_owner);
+                bl_owner = flock->blocking_owner;
+                bl_exp_new = class_export_get(flock->blocking_export);
+                class_export_put(bl_exp);
+
+               cfs_hash_put(bl_exp->exp_flock_hash, &lock->l_exp_flock_hash);
+                bl_exp = bl_exp_new;
+
+                if (bl_owner == req_owner && bl_exp == req_exp) {
+                        class_export_put(bl_exp);
                         return 1;
                 }
-
-                goto restart;
         }
-        spin_unlock(&ldlm_flock_waitq_lock);
+        class_export_put(bl_exp);
 
         return 0;
 }
 
+/**
+ * Process a granting attempt for flock lock.
+ * Must be called under ns lock held.
+ *
+ * This function looks for any conflicts for \a lock in the granted or
+ * waiting queues. The lock is granted if no conflicts are found in
+ * either queue.
+ *
+ * It is also responsible for splitting a lock if a portion of the lock
+ * is released.
+ *
+ * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
+ *   - blocking ASTs have already been sent
+ *
+ * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
+ *   - blocking ASTs have not been sent yet, so list of conflicting locks
+ *     would be collected and ASTs sent.
+ */
 int
-ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
-                        ldlm_error_t *err, struct list_head *work_list)
+ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
+                       ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = req->l_resource;
-        struct ldlm_namespace *ns = res->lr_namespace;
-        struct list_head *tmp;
-        struct list_head *ownlocks = NULL;
+        struct ldlm_namespace *ns = ldlm_res_to_ns(res);
+        cfs_list_t *tmp;
+        cfs_list_t *ownlocks = NULL;
         struct ldlm_lock *lock = NULL;
         struct ldlm_lock *new = req;
         struct ldlm_lock *new2 = NULL;
@@ -171,8 +250,10 @@ ldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,
         const struct ldlm_callback_suite null_cbs = { NULL };
         ENTRY;
 
-        CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64
-               "\n", *flags, new->l_policy_data.l_flock.pid, mode,
+       CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
+              LPU64" end "LPU64"\n", *flags,
+              new->l_policy_data.l_flock.owner,
+               new->l_policy_data.l_flock.pid, mode,
                req->l_policy_data.l_flock.start,
                req->l_policy_data.l_flock.end);
 
@@ -191,8 +272,9 @@ reprocess:
         if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
                 /* This loop determines where this processes locks start
                  * in the resource lr_granted list. */
-                list_for_each(tmp, &res->lr_granted) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                cfs_list_for_each(tmp, &res->lr_granted) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
                         if (ldlm_same_flock_owner(lock, req)) {
                                 ownlocks = tmp;
                                 break;
@@ -203,8 +285,9 @@ reprocess:
 
                 /* This loop determines if there are existing locks
                  * that conflict with the new lock request. */
-                list_for_each(tmp, &res->lr_granted) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+                cfs_list_for_each(tmp, &res->lr_granted) {
+                        lock = cfs_list_entry(tmp, struct ldlm_lock,
+                                              l_res_link);
 
                         if (ldlm_same_flock_owner(lock, req)) {
                                 if (!ownlocks)
@@ -241,21 +324,16 @@ reprocess:
                                 RETURN(LDLM_ITER_STOP);
                         }
 
-                        if (ldlm_flock_deadlock(req, lock)) {
-                                ldlm_flock_destroy(req, mode, *flags);
-                                *err = -EDEADLK;
-                                RETURN(LDLM_ITER_STOP);
-                        }
-
-                        req->l_policy_data.l_flock.blocking_pid =
-                                lock->l_policy_data.l_flock.pid;
-                        req->l_policy_data.l_flock.blocking_export =
-                                (long)(void *)lock->l_export;
+                       /* add lock to blocking list before deadlock
+                        * check to prevent race */
+                       ldlm_flock_blocking_link(req, lock);
 
-                        LASSERT(list_empty(&req->l_flock_waitq));
-                        spin_lock(&ldlm_flock_waitq_lock);
-                        list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);
-                        spin_unlock(&ldlm_flock_waitq_lock);
+                       if (ldlm_flock_deadlock(req, lock)) {
+                               ldlm_flock_blocking_unlink(req);
+                               ldlm_flock_destroy(req, mode, *flags);
+                               *err = -EDEADLK;
+                               RETURN(LDLM_ITER_STOP);
+                       }
 
                         ldlm_resource_add_lock(res, &res->lr_waiting, req);
                         *flags |= LDLM_FL_BLOCK_GRANTED;
@@ -271,10 +349,8 @@ reprocess:
         }
 
         /* In case we had slept on this lock request take it off of the
-         * deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&req->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        * deadlock detection hash list. */
+        ldlm_flock_blocking_unlink(req);
 
         /* Scan the locks owned by this process that overlap this request.
          * We may have to merge or split existing locks. */
@@ -283,7 +359,7 @@ reprocess:
                 ownlocks = &res->lr_granted;
 
         list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {
-                lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);
+                lock = cfs_list_entry(ownlocks, struct ldlm_lock, l_res_link);
 
                 if (!ldlm_same_flock_owner(lock, new))
                         break;
@@ -369,16 +445,17 @@ reprocess:
                  * it must see the original lock data in the reply. */
 
                 /* XXX - if ldlm_lock_new() can sleep we should
-                 * release the ns_lock, allocate the new lock,
+                 * release the lr_lock, allocate the new lock,
                  * and restart processing this lock. */
                 if (!new2) {
                         unlock_res_and_lock(req);
-                         new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
-                                        lock->l_granted_mode, &null_cbs,
-                                        NULL, 0);
+                       new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
+                                               lock->l_granted_mode, &null_cbs,
+                                               NULL, 0, LVB_T_NONE);
                         lock_res_and_lock(req);
                         if (!new2) {
-                                ldlm_flock_destroy(req, lock->l_granted_mode, *flags);
+                                ldlm_flock_destroy(req, lock->l_granted_mode,
+                                                   *flags);
                                 *err = -ENOLCK;
                                 RETURN(LDLM_ITER_STOP);
                         }
@@ -390,6 +467,8 @@ reprocess:
                 new2->l_granted_mode = lock->l_granted_mode;
                 new2->l_policy_data.l_flock.pid =
                         new->l_policy_data.l_flock.pid;
+                new2->l_policy_data.l_flock.owner =
+                        new->l_policy_data.l_flock.owner;
                 new2->l_policy_data.l_flock.start =
                         lock->l_policy_data.l_flock.start;
                 new2->l_policy_data.l_flock.end =
@@ -398,15 +477,16 @@ reprocess:
                         new->l_policy_data.l_flock.end + 1;
                 new2->l_conn_export = lock->l_conn_export;
                 if (lock->l_export != NULL) {
-                        new2->l_export = class_export_get(lock->l_export);
-                        if (new2->l_export->exp_lock_hash && 
-                            hlist_unhashed(&new2->l_exp_hash))
-                                lustre_hash_add(new2->l_export->exp_lock_hash,
-                                                &new2->l_remote_handle,
-                                                &new2->l_exp_hash);
+                        new2->l_export = class_export_lock_get(lock->l_export, new2);
+                        if (new2->l_export->exp_lock_hash &&
+                            cfs_hlist_unhashed(&new2->l_exp_hash))
+                                cfs_hash_add(new2->l_export->exp_lock_hash,
+                                             &new2->l_remote_handle,
+                                             &new2->l_exp_hash);
                 }
                 if (*flags == LDLM_FL_WAIT_NOREPROC)
-                        ldlm_lock_addref_internal_nolock(new2, lock->l_granted_mode);
+                        ldlm_lock_addref_internal_nolock(new2,
+                                                         lock->l_granted_mode);
 
                 /* insert new2 at lock */
                 ldlm_resource_add_lock(res, ownlocks, new2);
@@ -423,12 +503,13 @@ reprocess:
 
         /* Add req to the granted queue before calling ldlm_reprocess_all(). */
         if (!added) {
-                list_del_init(&req->l_res_link);
+                cfs_list_del_init(&req->l_res_link);
                 /* insert new lock before ownlocks in list. */
                 ldlm_resource_add_lock(res, ownlocks, req);
         }
 
         if (*flags != LDLM_FL_WAIT_NOREPROC) {
+#ifdef HAVE_SERVER_SUPPORT
                 if (first_enq) {
                         /* If this is an unlock, reprocess the waitq and
                          * send completions ASTs for locks that can now be
@@ -447,7 +528,7 @@ restart:
                                                      &rpc_list);
 
                                 unlock_res_and_lock(req);
-                                rc = ldlm_run_ast_work(&rpc_list,
+                                rc = ldlm_run_ast_work(ns, &rpc_list,
                                                        LDLM_WORK_CP_AST);
                                 lock_res_and_lock(req);
                                 if (rc == -ERESTART)
@@ -457,12 +538,19 @@ restart:
                         LASSERT(req->l_completion_ast);
                         ldlm_add_ast_work_item(req, NULL, work_list);
                 }
+#else /* !HAVE_SERVER_SUPPORT */
+                /* The only one possible case for client-side calls flock
+                 * policy function is ldlm_flock_completion_ast inside which
+                 * carries LDLM_FL_WAIT_NOREPROC flag. */
+                CERROR("Illegal parameter for client-side-only module.\n");
+                LBUG();
+#endif /* HAVE_SERVER_SUPPORT */
         }
 
-        /* In case we're reprocessing the requested lock we can't destroy
-         * it until after calling ldlm_ast_work_item() above so that lawi()
-         * can bump the reference count on req. Otherwise req could be freed
-         * before the completion AST can be sent.  */
+       /* In case we're reprocessing the requested lock we can't destroy
+        * it until after calling ldlm_add_ast_work_item() above so that laawi()
+        * can bump the reference count on \a req. Otherwise \a req
+        * could be freed before the completion AST can be sent.  */
         if (added)
                 ldlm_flock_destroy(req, mode, *flags);
 
@@ -479,161 +567,311 @@ static void
 ldlm_flock_interrupted_wait(void *data)
 {
         struct ldlm_lock *lock;
-        struct lustre_handle lockh;
-        int rc;
         ENTRY;
 
         lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
 
-        /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+       /* take lock off the deadlock detection hash list. */
+       lock_res_and_lock(lock);
+        ldlm_flock_blocking_unlink(lock);
 
-        /* client side - set flag to prevent lock from being put on lru list */
+       /* client side - set flag to prevent lock from being put on LRU list */
         lock->l_flags |= LDLM_FL_CBPENDING;
-
-        ldlm_lock_decref_internal(lock, lock->l_req_mode);
-        ldlm_lock2handle(lock, &lockh);
-        rc = ldlm_cli_cancel(&lockh);
-        if (rc != ELDLM_OK)
-                CERROR("ldlm_cli_cancel: %d\n", rc);
+        unlock_res_and_lock(lock);
 
         EXIT;
 }
 
+/**
+ * Flock completion callback function.
+ *
+ * \param lock [in,out]: A lock to be handled
+ * \param flags    [in]: flags
+ * \param *data    [in]: ldlm_work_cp_ast_lock() will use ldlm_cb_set_arg
+ *
+ * \retval 0    : success
+ * \retval <0   : failure
+ */
 int
-ldlm_flock_completion_ast(struct ldlm_lock *lock, int flags, void *data)
+ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 {
-        struct ldlm_namespace *ns;
-        cfs_flock_t *getlk = lock->l_ast_data;
-        struct ldlm_flock_wait_data fwd;
-        struct obd_device *obd;
-        struct obd_import *imp = NULL;
-        ldlm_error_t err;
-        int rc = 0;
-        struct l_wait_info lwi;
+       struct file_lock                *getlk = lock->l_ast_data;
+        struct obd_device              *obd;
+        struct obd_import              *imp = NULL;
+        struct ldlm_flock_wait_data     fwd;
+        struct l_wait_info              lwi;
+        ldlm_error_t                    err;
+        int                             rc = 0;
         ENTRY;
 
-        CDEBUG(D_DLMTRACE, "flags: 0x%x data: %p getlk: %p\n",
+       CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
                flags, data, getlk);
 
         /* Import invalidation. We need to actually release the lock
          * references being held, so that it can go away. No point in
          * holding the lock even if app still believes it has it, since
          * server already dropped it anyway. Only for granted locks too. */
-        lock_res_and_lock(lock);
         if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
             (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
-                unlock_res_and_lock(lock);
                 if (lock->l_req_mode == lock->l_granted_mode &&
-                    lock->l_granted_mode != LCK_NL)
+                    lock->l_granted_mode != LCK_NL &&
+                    NULL == data)
                         ldlm_lock_decref_internal(lock, lock->l_req_mode);
+
+                /* Need to wake up the waiter if we were evicted */
+                cfs_waitq_signal(&lock->l_waitq);
                 RETURN(0);
         }
-        unlock_res_and_lock(lock);
 
         LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
 
         if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
-                       LDLM_FL_BLOCK_CONV)))
-                goto  granted;
+                       LDLM_FL_BLOCK_CONV))) {
+                if (NULL == data)
+                        /* mds granted the lock in the reply */
+                        goto granted;
+                /* CP AST RPC: lock get granted, wake it up */
+                cfs_waitq_signal(&lock->l_waitq);
+                RETURN(0);
+        }
 
         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
                    "sleeping");
-
         fwd.fwd_lock = lock;
         obd = class_exp2obd(lock->l_conn_export);
 
-        /* if this is a local lock, then there is no import */
-        if (obd != NULL)
+        /* if this is a local lock, there is no import */
+        if (NULL != obd)
                 imp = obd->u.cli.cl_import;
 
-        if (imp != NULL) {
-                spin_lock(&imp->imp_lock);
-                fwd.fwd_generation = imp->imp_generation;
-                spin_unlock(&imp->imp_lock);
+        if (NULL != imp) {
+               spin_lock(&imp->imp_lock);
+               fwd.fwd_generation = imp->imp_generation;
+               spin_unlock(&imp->imp_lock);
         }
 
         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
 
         /* Go to sleep until the lock is granted. */
-        rc = l_wait_event(lock->l_waitq,
-                          ((lock->l_req_mode == lock->l_granted_mode) ||
-                           lock->l_destroyed), &lwi);
+        rc = l_wait_event(lock->l_waitq, is_granted_or_cancelled(lock), &lwi);
 
-        LDLM_DEBUG(lock, "client-side enqueue waking up: rc = %d", rc);
-        RETURN(rc);
+        if (rc) {
+                LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+                           rc);
+                RETURN(rc);
+        }
 
 granted:
         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
-        LDLM_DEBUG(lock, "client-side enqueue granted");
-        ns = lock->l_resource->lr_namespace;
-        lock_res_and_lock(lock);
-
-        /* before flock's complete ast gets here, the flock
-         * can possibly be freed by another thread
-         */
-        if (lock->l_destroyed) {
-                LDLM_DEBUG(lock, "already destroyed by another thread");
-                unlock_res(lock->l_resource);
-                RETURN(0);
+
+       if (lock->l_flags & LDLM_FL_DESTROYED) {
+               LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+               RETURN(0);
+       }
+
+        if (lock->l_flags & LDLM_FL_FAILED) {
+                LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
+                RETURN(-EIO);
+        }
+
+        if (rc) {
+                LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
+                           rc);
+                RETURN(rc);
         }
 
-        /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+        LDLM_DEBUG(lock, "client-side enqueue granted");
+
+       lock_res_and_lock(lock);
+
+       /* take lock off the deadlock detection hash list. */
+        ldlm_flock_blocking_unlink(lock);
 
         /* ldlm_lock_enqueue() has already placed lock on the granted list. */
-        list_del_init(&lock->l_res_link);
+        cfs_list_del_init(&lock->l_res_link);
 
         if (flags & LDLM_FL_TEST_LOCK) {
                 /* fcntl(F_GETLK) request */
                 /* The old mode was saved in getlk->fl_type so that if the mode
-                 * in the lock changes we can decref the approprate refcount. */
-                ldlm_flock_destroy(lock, cfs_flock_type(getlk), LDLM_FL_WAIT_NOREPROC);
-                switch (lock->l_granted_mode) {
-                case LCK_PR:
-                        cfs_flock_set_type(getlk, F_RDLCK);
-                        break;
-                case LCK_PW:
-                        cfs_flock_set_type(getlk, F_WRLCK);
-                        break;
-                default:
-                        cfs_flock_set_type(getlk, F_UNLCK);
-                }
-                cfs_flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
-                cfs_flock_set_start(getlk, (loff_t)lock->l_policy_data.l_flock.start);
-                cfs_flock_set_end(getlk, (loff_t)lock->l_policy_data.l_flock.end);
-        } else {
-                int noreproc = LDLM_FL_WAIT_NOREPROC;
-
-                /* We need to reprocess the lock to do merges or splits
-                 * with existing locks owned by this process. */
-                ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
-                if (flags == 0)
-                        cfs_waitq_signal(&lock->l_waitq);
-        }
-        unlock_res_and_lock(lock);
-        RETURN(0);
+                 * in the lock changes we can decref the appropriate refcount.*/
+               ldlm_flock_destroy(lock, flock_type(getlk),
+                                  LDLM_FL_WAIT_NOREPROC);
+               switch (lock->l_granted_mode) {
+               case LCK_PR:
+                       flock_set_type(getlk, F_RDLCK);
+                       break;
+               case LCK_PW:
+                       flock_set_type(getlk, F_WRLCK);
+                       break;
+               default:
+                       flock_set_type(getlk, F_UNLCK);
+               }
+               flock_set_pid(getlk, (pid_t)lock->l_policy_data.l_flock.pid);
+               flock_set_start(getlk,
+                               (loff_t)lock->l_policy_data.l_flock.start);
+               flock_set_end(getlk,
+                             (loff_t)lock->l_policy_data.l_flock.end);
+       } else {
+               __u64 noreproc = LDLM_FL_WAIT_NOREPROC;
+
+               /* We need to reprocess the lock to do merges or splits
+                * with existing locks owned by this process. */
+               ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
+       }
+       unlock_res_and_lock(lock);
+       RETURN(0);
 }
 EXPORT_SYMBOL(ldlm_flock_completion_ast);
 
 int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
                             void *data, int flag)
 {
-        struct ldlm_namespace *ns;
         ENTRY;
 
         LASSERT(lock);
         LASSERT(flag == LDLM_CB_CANCELING);
 
-        ns = lock->l_resource->lr_namespace;
-
-        /* take lock off the deadlock detection waitq. */
-        spin_lock(&ldlm_flock_waitq_lock);
-        list_del_init(&lock->l_flock_waitq);
-        spin_unlock(&ldlm_flock_waitq_lock);
+       /* take lock off the deadlock detection hash list. */
+       lock_res_and_lock(lock);
+        ldlm_flock_blocking_unlink(lock);
+       unlock_res_and_lock(lock);
         RETURN(0);
 }
+
+void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
+                                       ldlm_policy_data_t *lpolicy)
+{
+        memset(lpolicy, 0, sizeof(*lpolicy));
+        lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+        lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+        lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+        /* Compat code, old clients had no idea about owner field and
+         * relied solely on pid for ownership. Introduced in LU-104, 2.1,
+         * April 2011 */
+        lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
+}
+
+
+void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
+                                       ldlm_policy_data_t *lpolicy)
+{
+        memset(lpolicy, 0, sizeof(*lpolicy));
+        lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
+        lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
+        lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
+        lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
+}
+
+void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
+                                     ldlm_wire_policy_data_t *wpolicy)
+{
+        memset(wpolicy, 0, sizeof(*wpolicy));
+        wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
+        wpolicy->l_flock.lfw_end = lpolicy->l_flock.end;
+        wpolicy->l_flock.lfw_pid = lpolicy->l_flock.pid;
+        wpolicy->l_flock.lfw_owner = lpolicy->l_flock.owner;
+}
+
+/*
+ * Export handle<->flock hash operations.
+ */
+static unsigned
+ldlm_export_flock_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+{
+       return cfs_hash_u64_hash(*(__u64 *)key, mask);
+}
+
+static void *
+ldlm_export_flock_key(cfs_hlist_node_t *hnode)
+{
+       struct ldlm_lock *lock;
+
+       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       return &lock->l_policy_data.l_flock.owner;
+}
+
+static int
+ldlm_export_flock_keycmp(const void *key, cfs_hlist_node_t *hnode)
+{
+       return !memcmp(ldlm_export_flock_key(hnode), key, sizeof(__u64));
+}
+
+static void *
+ldlm_export_flock_object(cfs_hlist_node_t *hnode)
+{
+       return cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+}
+
+static void
+ldlm_export_flock_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+       struct ldlm_lock *lock;
+       struct ldlm_flock *flock;
+
+       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       LDLM_LOCK_GET(lock);
+
+       flock = &lock->l_policy_data.l_flock;
+       LASSERT(flock->blocking_export != NULL);
+       class_export_get(flock->blocking_export);
+       flock->blocking_refs++;
+}
+
+static void
+ldlm_export_flock_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+{
+       struct ldlm_lock *lock;
+       struct ldlm_flock *flock;
+
+       lock = cfs_hlist_entry(hnode, struct ldlm_lock, l_exp_flock_hash);
+       LDLM_LOCK_RELEASE(lock);
+
+       flock = &lock->l_policy_data.l_flock;
+       LASSERT(flock->blocking_export != NULL);
+       class_export_put(flock->blocking_export);
+       if (--flock->blocking_refs == 0) {
+               flock->blocking_owner = 0;
+               flock->blocking_export = NULL;
+       }
+}
+
+static cfs_hash_ops_t ldlm_export_flock_ops = {
+       .hs_hash        = ldlm_export_flock_hash,
+       .hs_key         = ldlm_export_flock_key,
+       .hs_keycmp      = ldlm_export_flock_keycmp,
+       .hs_object      = ldlm_export_flock_object,
+       .hs_get         = ldlm_export_flock_get,
+       .hs_put         = ldlm_export_flock_put,
+       .hs_put_locked  = ldlm_export_flock_put,
+};
+
+int ldlm_init_flock_export(struct obd_export *exp)
+{
+       if( strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
+               RETURN(0);
+
+       exp->exp_flock_hash =
+               cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
+                               HASH_EXP_LOCK_CUR_BITS,
+                               HASH_EXP_LOCK_MAX_BITS,
+                               HASH_EXP_LOCK_BKT_BITS, 0,
+                               CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
+                               &ldlm_export_flock_ops,
+                               CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
+       if (!exp->exp_flock_hash)
+               RETURN(-ENOMEM);
+
+       RETURN(0);
+}
+EXPORT_SYMBOL(ldlm_init_flock_export);
+
+void ldlm_destroy_flock_export(struct obd_export *exp)
+{
+       ENTRY;
+       if (exp->exp_flock_hash) {
+               cfs_hash_putref(exp->exp_flock_hash);
+               exp->exp_flock_hash = NULL;
+       }
+       EXIT;
+}
+EXPORT_SYMBOL(ldlm_destroy_flock_export);