Whamcloud - gitweb
LU-12616 obclass: fix MDS start/stop race
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
index ace668a..9e046c9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2010, 2013, Intel Corporation.
+ * Copyright (c) 2010, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -94,7 +90,7 @@ static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
          * the client requested. Also we need to make sure it's also server
          * page size aligned otherwise a server page can be covered by two
          * write locks. */
-       mask = PAGE_CACHE_SIZE;
+       mask = PAGE_SIZE;
         req_align = (req_end + 1) | req_start;
         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
                 while ((req_align & mask) == 0)
@@ -107,10 +103,10 @@ static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
         new_ex->start = ((new_ex->start - 1) | mask) + 1;
         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
         LASSERTF(new_ex->start <= req_start,
-                 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
+                "mask %#llx grant start %llu req start %llu\n",
                  mask, new_ex->start, req_start);
         LASSERTF(new_ex->end >= req_end,
-                 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
+                "mask %#llx grant end %llu req end %llu\n",
                  mask, new_ex->end, req_end);
 }
 
@@ -134,7 +130,10 @@ static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
        __u64 req_start = req->l_req_extent.start;
        __u64 req_end = req->l_req_extent.end;
        struct ldlm_interval_tree *tree;
-       struct interval_node_extent limiter = { new_ex->start, new_ex->end };
+       struct interval_node_extent limiter = {
+               .start  = new_ex->start,
+               .end    = new_ex->end,
+       };
        int conflicting = 0;
        int idx;
        ENTRY;
@@ -143,7 +142,10 @@ static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
 
        /* Using interval tree to handle the LDLM extent granted locks. */
         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
-                struct interval_node_extent ext = { req_start, req_end };
+               struct interval_node_extent ext = {
+                       .start  = req_start,
+                       .end    = req_end,
+               };
 
                 tree = &res->lr_itree[idx];
                 if (lockmode_compat(tree->lit_mode, req_mode))
@@ -267,38 +269,49 @@ ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
 static void ldlm_extent_policy(struct ldlm_resource *res,
                               struct ldlm_lock *lock, __u64 *flags)
 {
-        struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
-
-        if (lock->l_export == NULL)
-                /*
-                 * this is local lock taken by server (e.g., as a part of
-                 * OST-side locking, or unlink handling). Expansion doesn't
-                 * make a lot of sense for local locks, because they are
-                 * dropped immediately on operation completion and would only
-                 * conflict with other threads.
-                 */
-                return;
+       struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
+
+       if (lock->l_export == NULL)
+               /*
+                * this is a local lock taken by server (e.g., as a part of
+                * OST-side locking, or unlink handling). Expansion doesn't
+                * make a lot of sense for local locks, because they are
+                * dropped immediately on operation completion and would only
+                * conflict with other threads.
+                */
+               return;
 
-        if (lock->l_policy_data.l_extent.start == 0 &&
-            lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
-                /* fast-path whole file locks */
-                return;
+       if (lock->l_policy_data.l_extent.start == 0 &&
+           lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
+               /* fast-path whole file locks */
+               return;
 
-        ldlm_extent_internal_policy_granted(lock, &new_ex);
-        ldlm_extent_internal_policy_waiting(lock, &new_ex);
+       /* Because reprocess_queue zeroes flags and uses it to return
+        * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
+        * in the lock flags rather than the 'flags' argument */
+       if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
+               ldlm_extent_internal_policy_granted(lock, &new_ex);
+               ldlm_extent_internal_policy_waiting(lock, &new_ex);
+       } else {
+               LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
+               new_ex.start = lock->l_policy_data.l_extent.start;
+               new_ex.end = lock->l_policy_data.l_extent.end;
+               /* In case the request is not on correct boundaries, we call
+                * fixup. (normally called in ldlm_extent_internal_policy_*) */
+               ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
+       }
 
-        if (new_ex.start != lock->l_policy_data.l_extent.start ||
-            new_ex.end != lock->l_policy_data.l_extent.end) {
-                *flags |= LDLM_FL_LOCK_CHANGED;
-                lock->l_policy_data.l_extent.start = new_ex.start;
-                lock->l_policy_data.l_extent.end = new_ex.end;
-        }
+       if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
+               *flags |= LDLM_FL_LOCK_CHANGED;
+               lock->l_policy_data.l_extent.start = new_ex.start;
+               lock->l_policy_data.l_extent.end = new_ex.end;
+       }
 }
 
 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
 {
        struct ldlm_resource *res = lock->l_resource;
-       cfs_time_t now = cfs_time_current();
+       time64_t now = ktime_get_seconds();
 
        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
                return 1;
@@ -306,8 +319,9 @@ static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
        CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
        if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
                res->lr_contention_time = now;
-       return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
-               cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
+
+       return now < res->lr_contention_time +
+                    ldlm_res_to_ns(res)->ns_contention_time;
 }
 
 struct ldlm_extent_compat_args {
@@ -419,7 +433,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         }
 
                         if (tree->lit_mode == LCK_GROUP) {
-                                if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                               if (*flags & (LDLM_FL_BLOCK_NOWAIT |
+                                             LDLM_FL_SPECULATIVE)) {
                                         compat = -EWOULDBLOCK;
                                         goto destroylock;
                                 }
@@ -436,10 +451,24 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                 continue;
                         }
 
-                        if (!work_list) {
-                                rc = interval_is_overlapped(tree->lit_root,&ex);
-                                if (rc)
-                                        RETURN(0);
+                       /* We've found a potentially blocking lock, check
+                        * compatibility.  This handles locks other than GROUP
+                        * locks, which are handled separately above.
+                        *
+                        * Locks with FL_SPECULATIVE are asynchronous requests
+                        * which must never wait behind another lock, so they
+                        * fail if any conflicting lock is found. */
+                       if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
+                               rc = interval_is_overlapped(tree->lit_root,
+                                                           &ex);
+                               if (rc) {
+                                       if (!work_list) {
+                                               RETURN(0);
+                                       } else {
+                                               compat = -EWOULDBLOCK;
+                                               goto destroylock;
+                                       }
+                               }
                         } else {
                                 interval_search(tree->lit_root, &ex,
                                                 ldlm_extent_compat_cb, &data);
@@ -526,8 +555,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                     lock->l_policy_data.l_extent.gid) {
                                         /* If existing lock with matched gid is granted,
                                            we grant new one too. */
-                                        if (lock->l_req_mode == lock->l_granted_mode)
-                                                RETURN(2);
+                                       if (ldlm_is_granted(lock))
+                                               RETURN(2);
 
                                         /* Otherwise we are scanning queue of waiting
                                          * locks and it means current request would
@@ -535,7 +564,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                          * already blocked.
                                          * If we are in nonblocking mode - return
                                          * immediately */
-                                        if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                                       if (*flags & (LDLM_FL_BLOCK_NOWAIT
+                                                     | LDLM_FL_SPECULATIVE)) {
                                                 compat = -EWOULDBLOCK;
                                                 goto destroylock;
                                         }
@@ -554,8 +584,8 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                 }
                         }
 
-                        if (unlikely(req_mode == LCK_GROUP &&
-                                     (lock->l_req_mode != lock->l_granted_mode))) {
+                       if (unlikely(req_mode == LCK_GROUP &&
+                                    !ldlm_is_granted(lock))) {
                                 scan = 1;
                                 compat = 0;
                                 if (lock->l_req_mode != LCK_GROUP) {
@@ -578,10 +608,11 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         }
 
                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
-                                /* If compared lock is GROUP, then requested is PR/PW/
-                                 * so this is not compatible; extent range does not
-                                 * matter */
-                                if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                               /* If compared lock is GROUP, then requested is
+                                * PR/PW so this is not compatible; extent
+                                * range does not matter */
+                               if (*flags & (LDLM_FL_BLOCK_NOWAIT
+                                             | LDLM_FL_SPECULATIVE)) {
                                         compat = -EWOULDBLOCK;
                                         goto destroylock;
                                 } else {
@@ -600,6 +631,11 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                         if (!work_list)
                                 RETURN(0);
 
+                       if (*flags & LDLM_FL_SPECULATIVE) {
+                               compat = -EWOULDBLOCK;
+                               goto destroylock;
+                       }
+
                         /* don't count conflicting glimpse locks */
                         if (lock->l_req_mode == LCK_PR &&
                             lock->l_policy_data.l_extent.start == 0 &&
@@ -640,7 +676,9 @@ destroylock:
 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
                           struct ldlm_prolong_args *arg)
 {
-       int timeout;
+       time64_t timeout;
+
+       OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
 
        if (arg->lpa_export != lock->l_export ||
            lock->l_flags & LDLM_FL_DESTROYED)
@@ -658,7 +696,7 @@ void ldlm_lock_prolong_one(struct ldlm_lock *lock,
         */
        timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
 
-       LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
+       LDLM_DEBUG(lock, "refreshed to %llds.\n", timeout);
 
        arg->lpa_blocks_cnt++;
 
@@ -705,9 +743,8 @@ void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
        res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
                                &arg->lpa_resid, LDLM_EXTENT, 0);
        if (IS_ERR(res)) {
-               CDEBUG(D_DLMTRACE, "Failed to get resource for resid "LPU64"/"
-                      LPU64"\n", arg->lpa_resid.name[0],
-                      arg->lpa_resid.name[1]);
+               CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
+                      arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
                RETURN_EXIT;
        }
 
@@ -736,34 +773,6 @@ void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
 }
 EXPORT_SYMBOL(ldlm_resource_prolong);
 
-
-/**
- * Discard all AST work items from list.
- *
- * If for whatever reason we do not want to send ASTs to conflicting locks
- * anymore, disassemble the list with this function.
- */
-static void discard_bl_list(struct list_head *bl_list)
-{
-       struct list_head *tmp, *pos;
-        ENTRY;
-
-       list_for_each_safe(pos, tmp, bl_list) {
-                struct ldlm_lock *lock =
-                       list_entry(pos, struct ldlm_lock, l_bl_ast);
-
-               list_del_init(&lock->l_bl_ast);
-               LASSERT(ldlm_is_ast_sent(lock));
-               ldlm_clear_ast_sent(lock);
-                LASSERT(lock->l_bl_ast_run == 0);
-                LASSERT(lock->l_blocking_lock);
-                LDLM_LOCK_RELEASE(lock->l_blocking_lock);
-                lock->l_blocking_lock = NULL;
-                LDLM_LOCK_RELEASE(lock);
-        }
-        EXIT;
-}
-
 /**
  * Process a granting attempt for extent lock.
  * Must be called with ns lock held.
@@ -771,38 +780,30 @@ static void discard_bl_list(struct list_head *bl_list)
  * This function looks for any conflicts for \a lock in the granted or
  * waiting queues. The lock is granted if no conflicts are found in
  * either queue.
- *
- * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
- *   - blocking ASTs have already been sent
- *
- * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
- *   - blocking ASTs have not been sent yet, so list of conflicting locks
- *     would be collected and ASTs sent.
  */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
-                            int first_enq, enum ldlm_error *err,
-                            struct list_head *work_list)
+                            enum ldlm_process_intention intention,
+                            enum ldlm_error *err, struct list_head *work_list)
 {
        struct ldlm_resource *res = lock->l_resource;
-       struct list_head rpc_list;
        int rc, rc2;
        int contended_locks = 0;
+       struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
+                                                       NULL : work_list;
        ENTRY;
 
-       LASSERT(lock->l_granted_mode != lock->l_req_mode);
-       LASSERT(list_empty(&res->lr_converting));
+       LASSERT(!ldlm_is_granted(lock));
        LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
                !ldlm_is_ast_discard_data(lock));
-       INIT_LIST_HEAD(&rpc_list);
        check_res_locked(res);
        *err = ELDLM_OK;
 
-        if (!first_enq) {
-                /* Careful observers will note that we don't handle -EWOULDBLOCK
-                 * here, but it's ok for a non-obvious reason -- compat_queue
-                 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
-                 * flags should always be zero here, and if that ever stops
-                 * being true, we want to find out. */
+       if (intention == LDLM_PROCESS_RESCAN) {
+               /* Careful observers will note that we don't handle -EWOULDBLOCK
+                * here, but it's ok for a non-obvious reason -- compat_queue
+                * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
+                * SPECULATIVE). flags should always be zero here, and if that
+                * ever stops being true, we want to find out. */
                 LASSERT(*flags == 0);
                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
                                               err, NULL, &contended_locks);
@@ -818,128 +819,153 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
 
                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
                         ldlm_extent_policy(res, lock, flags);
-                ldlm_grant_lock(lock, work_list);
+               ldlm_grant_lock(lock, grant_work);
                 RETURN(LDLM_ITER_CONTINUE);
         }
 
- restart:
         contended_locks = 0;
         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
-                                      &rpc_list, &contended_locks);
-        if (rc < 0)
-                GOTO(out, rc); /* lock was destroyed */
-        if (rc == 2)
-                goto grant;
-
-        rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
-                                       &rpc_list, &contended_locks);
-        if (rc2 < 0)
-                GOTO(out, rc = rc2); /* lock was destroyed */
-
-        if (rc + rc2 == 2) {
-        grant:
-                ldlm_extent_policy(res, lock, flags);
-                ldlm_resource_unlink_lock(lock);
-                ldlm_grant_lock(lock, NULL);
-        } else {
-                /* If either of the compat_queue()s returned failure, then we
-                 * have ASTs to send and must go onto the waiting list.
-                 *
-                 * bug 2322: we used to unlink and re-add here, which was a
-                 * terrible folly -- if we goto restart, we could get
-                 * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-               if (list_empty(&lock->l_res_link))
-                        ldlm_resource_add_lock(res, &res->lr_waiting, lock);
-                unlock_res(res);
-                rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
-                                       LDLM_WORK_BL_AST);
-
-                if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
-                    !ns_is_client(ldlm_res_to_ns(res)))
-                        class_fail_export(lock->l_export);
-
-               lock_res(res);
-               if (rc == -ERESTART) {
-                       /* 15715: The lock was granted and destroyed after
-                        * resource lock was dropped. Interval node was freed
-                        * in ldlm_lock_destroy. Anyway, this always happens
-                        * when a client is being evicted. So it would be
-                        * ok to return an error. -jay */
-                       if (ldlm_is_destroyed(lock)) {
-                               *err = -EAGAIN;
-                               GOTO(out, rc = -EAGAIN);
-                       }
+                                     work_list, &contended_locks);
+       if (rc < 0)
+               GOTO(out_rpc_list, rc);
+
+       rc2 = 0;
+       if (rc != 2) {
+               rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
+                                              flags, err, work_list,
+                                              &contended_locks);
+               if (rc2 < 0)
+                       GOTO(out_rpc_list, rc = rc2);
+       }
 
-                       /* lock was granted while resource was unlocked. */
-                       if (lock->l_granted_mode == lock->l_req_mode) {
-                               /* bug 11300: if the lock has been granted,
-                                * break earlier because otherwise, we will go
-                                * to restart and ldlm_resource_unlink will be
-                                * called and it causes the interval node to be
-                                * freed. Then we will fail at
-                                * ldlm_extent_add_lock() */
-                               *flags &= ~LDLM_FL_BLOCKED_MASK;
-                               GOTO(out, rc = 0);
-                       }
+       if (rc + rc2 == 2) {
+               ldlm_extent_policy(res, lock, flags);
+               ldlm_resource_unlink_lock(lock);
+               ldlm_grant_lock(lock, grant_work);
+       } else {
+               /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
+                * force client to wait for the lock endlessly once
+                * the lock is enqueued -bzzz */
+               *flags |= LDLM_FL_NO_TIMEOUT;
+       }
+       rc = LDLM_ITER_CONTINUE;
 
-                       GOTO(restart, rc);
-               }
+out_rpc_list:
+       RETURN(rc);
+}
+#endif /* HAVE_SERVER_SUPPORT */
 
-               /* this way we force client to wait for the lock
-                * endlessly once the lock is enqueued -bzzz */
-               *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
+struct ldlm_kms_shift_args {
+       __u64   old_kms;
+       __u64   kms;
+       bool    complete;
+};
 
+/* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
+static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
+                                           void *args)
+{
+       struct ldlm_kms_shift_args *arg = args;
+       struct ldlm_interval *node = to_ldlm_interval(n);
+       struct ldlm_lock *tmplock;
+       struct ldlm_lock *lock = NULL;
+
+       ENTRY;
+
+       /* Since all locks in an interval have the same extent, we can just
+        * use the first lock without kms_ignore set. */
+       list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
+               if (ldlm_is_kms_ignore(tmplock))
+                       continue;
+
+               lock = tmplock;
+
+               break;
        }
-       RETURN(0);
-out:
-       if (!list_empty(&rpc_list)) {
-               LASSERT(!ldlm_is_ast_discard_data(lock));
-               discard_bl_list(&rpc_list);
+
+       /* No locks in this interval without kms_ignore set */
+       if (!lock)
+               RETURN(INTERVAL_ITER_CONT);
+
+       /* If we find a lock with a greater or equal kms, we are not the
+        * highest lock (or we share that distinction with another lock), and
+        * don't need to update KMS.  Return old_kms and stop looking. */
+       if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
+               arg->kms = arg->old_kms;
+               arg->complete = true;
+               RETURN(INTERVAL_ITER_STOP);
        }
-       RETURN(rc);
+
+       if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
+               arg->kms = lock->l_policy_data.l_extent.end + 1;
+
+       /* Since interval_iterate_reverse starts with the highest lock and
+        * works down, for PW locks, we only need to check if we should update
+        * the kms, then stop walking the tree.  PR locks are not exclusive, so
+        * the highest start does not imply the highest end and we must
+        * continue. (Only one group lock is allowed per resource, so this is
+        * irrelevant for group locks.)*/
+       if (lock->l_granted_mode == LCK_PW)
+               RETURN(INTERVAL_ITER_STOP);
+       else
+               RETURN(INTERVAL_ITER_CONT);
 }
-#endif /* HAVE_SERVER_SUPPORT */
 
 /* When a lock is cancelled by a client, the KMS may undergo change if this
- * is the "highest lock".  This function returns the new KMS value.
+ * is the "highest lock".  This function returns the new KMS value, updating
+ * it only if we were the highest lock.
+ *
  * Caller must hold lr_lock already.
  *
  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
-        struct ldlm_resource *res = lock->l_resource;
-       struct list_head *tmp;
-        struct ldlm_lock *lck;
-        __u64 kms = 0;
-        ENTRY;
-
-        /* don't let another thread in ldlm_extent_shift_kms race in
-         * just after we finish and take our lock into account in its
-         * calculation of the kms */
+       struct ldlm_resource *res = lock->l_resource;
+       struct ldlm_interval_tree *tree;
+       struct ldlm_kms_shift_args args;
+       int idx = 0;
+
+       ENTRY;
+
+       args.old_kms = old_kms;
+       args.kms = 0;
+       args.complete = false;
+
+       /* don't let another thread in ldlm_extent_shift_kms race in
+        * just after we finish and take our lock into account in its
+        * calculation of the kms */
        ldlm_set_kms_ignore(lock);
 
-       list_for_each(tmp, &res->lr_granted) {
-               lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+       /* We iterate over the lock trees, looking for the largest kms smaller
+        * than the current one. */
+       for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+               tree = &res->lr_itree[idx];
 
-               if (ldlm_is_kms_ignore(lck))
-                        continue;
+               /* If our already known kms is >= than the highest 'end' in
+                * this tree, we don't need to check this tree, because
+                * the kms from a tree can be lower than in_max_high (due to
+                * kms_ignore), but it can never be higher. */
+               if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
+                       continue;
 
-                if (lck->l_policy_data.l_extent.end >= old_kms)
-                        RETURN(old_kms);
+               interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
+                                        &args);
 
-                /* This extent _has_ to be smaller than old_kms (checked above)
-                 * so kms can only ever be smaller or the same as old_kms. */
-                if (lck->l_policy_data.l_extent.end + 1 > kms)
-                        kms = lck->l_policy_data.l_extent.end + 1;
-        }
-        LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
+               /* this tells us we're not the highest lock, so we don't need
+                * to check the remaining trees */
+               if (args.complete)
+                       break;
+       }
+
+       LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
+                args.old_kms);
 
-        RETURN(kms);
+       RETURN(args.kms);
 }
 EXPORT_SYMBOL(ldlm_extent_shift_kms);
 
 struct kmem_cache *ldlm_interval_slab;
-struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
+static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
 {
        struct ldlm_interval *node;
        ENTRY;
@@ -993,13 +1019,20 @@ static inline int ldlm_mode_to_index(enum ldlm_mode mode)
        int index;
 
        LASSERT(mode != 0);
-       LASSERT(IS_PO2(mode));
-       for (index = -1; mode != 0; index++, mode >>= 1)
-               /* do nothing */;
+       LASSERT(is_power_of_2(mode));
+       index = ilog2(mode);
        LASSERT(index < LCK_MODE_NUM);
        return index;
 }
 
+int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
+{
+       lock->l_tree_node = NULL;
+       if (ldlm_interval_alloc(lock) == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
 /** Add newly granted lock into interval tree for the resource. */
 void ldlm_extent_add_lock(struct ldlm_resource *res,
                           struct ldlm_lock *lock)
@@ -1007,9 +1040,9 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
         struct interval_node *found, **root;
         struct ldlm_interval *node;
         struct ldlm_extent *extent;
-        int idx;
+       int idx, rc;
 
-        LASSERT(lock->l_granted_mode == lock->l_req_mode);
+       LASSERT(ldlm_is_granted(lock));
 
         node = lock->l_tree_node;
         LASSERT(node != NULL);
@@ -1021,7 +1054,9 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
 
         /* node extent initialize */
         extent = &lock->l_policy_data.l_extent;
-        interval_set(&node->li_node, extent->start, extent->end);
+
+       rc = interval_set(&node->li_node, extent->start, extent->end);
+       LASSERT(!rc);
 
         root = &res->lr_itree[idx].lit_root;
         found = interval_insert(&node->li_node, root);
@@ -1036,6 +1071,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
         /* even though we use interval tree to manage the extent lock, we also
          * add the locks into grant list, for debug purpose, .. */
         ldlm_resource_add_lock(res, &res->lr_granted, lock);
+
+       if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
+               struct ldlm_lock *lck;
+
+               list_for_each_entry_reverse(lck, &res->lr_granted,
+                                           l_res_link) {
+                       if (lck == lock)
+                               continue;
+                       if (lockmode_compat(lck->l_granted_mode,
+                                           lock->l_granted_mode))
+                               continue;
+                       if (ldlm_extent_overlap(&lck->l_req_extent,
+                                               &lock->l_req_extent)) {
+                               CDEBUG(D_ERROR, "granting conflicting lock %p "
+                                               "%p\n", lck, lock);
+                               ldlm_resource_dump(D_ERROR, res);
+                               LBUG();
+                       }
+               }
+       }
 }
 
 /** Remove cancelled lock from resource interval tree. */
@@ -1066,7 +1121,6 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
                                      union ldlm_policy_data *lpolicy)
 {
-       memset(lpolicy, 0, sizeof(*lpolicy));
        lpolicy->l_extent.start = wpolicy->l_extent.start;
        lpolicy->l_extent.end = wpolicy->l_extent.end;
        lpolicy->l_extent.gid = wpolicy->l_extent.gid;