Whamcloud - gitweb
b=19325 adjust waiting extent locks during 1st enqueue
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
index 8240eab..4eba10f 100644 (file)
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- *   Author: Peter Braam <braam@clusterfs.com>
- *   Author: Phil Schwan <phil@clusterfs.com>
+ * GPL HEADER START
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_extent.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
  */
 
 #define DEBUG_SUBSYSTEM S_LDLM
 #ifndef __KERNEL__
 # include <liblustre.h>
+#else
+# include <libcfs/libcfs.h>
 #endif
 
-#include <linux/lustre_dlm.h>
-#include <linux/obd_support.h>
-#include <linux/lustre_lib.h>
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <obd.h>
+#include <obd_class.h>
+#include <lustre_lib.h>
 
 #include "ldlm_internal.h"
 
-/* The purpose of this function is to return:
- * - the maximum extent
- * - containing the requested extent
- * - and not overlapping existing conflicting extents outside the requested one
- */
-static void
-ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
-                            struct ldlm_extent *new_ex)
+#define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
+
+/* fixup the ldlm_extent after expanding */
+static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
+                                              struct ldlm_extent *new_ex,
+                                              int conflicting)
 {
-        struct list_head *tmp;
         ldlm_mode_t req_mode = req->l_req_mode;
         __u64 req_start = req->l_req_extent.start;
         __u64 req_end = req->l_req_extent.end;
-        int conflicting = 0;
-        ENTRY;
-
-        lockmode_verify(req_mode);
+        __u64 req_align, mask;
 
-        list_for_each(tmp, queue) {
-                struct ldlm_lock *lock;
-                struct ldlm_extent *l_extent;
-
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                l_extent = &lock->l_policy_data.l_extent;
+        if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
+                if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
+                        new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
+                                          new_ex->end);
+        }
 
-                if (new_ex->start == req_start && new_ex->end == req_end) {
-                        EXIT;
-                        return;
-                }
+        if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
+                EXIT;
+                return;
+        }
 
-                /* Don't conflict with ourselves */
-                if (req == lock)
-                        continue;
+        /* we need to ensure that the lock extent is properly aligned to what
+         * the client requested.  We align it to the lowest-common denominator
+         * of the clients requested lock start and end alignment. */
+        mask = 0x1000ULL;
+        req_align = (req_end + 1) | req_start;
+        if (req_align != 0) {
+                while ((req_align & mask) == 0)
+                        mask <<= 1;
+        }
+        mask -= 1;
+        /* We can only shrink the lock, not grow it.
+         * This should never cause lock to be smaller than requested,
+         * since requested lock was already aligned on these boundaries. */
+        new_ex->start = ((new_ex->start - 1) | mask) + 1;
+        new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
+        LASSERTF(new_ex->start <= req_start,
+                 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
+                 mask, new_ex->start, req_start);
+        LASSERTF(new_ex->end >= req_end,
+                 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
+                 mask, new_ex->end, req_end);
+}
 
-                /* Locks are compatible, overlap doesn't matter */
-                /* Until bug 20 is fixed, try to avoid granting overlapping
-                 * locks on one client (they take a long time to cancel) */
-                if (lockmode_compat(lock->l_req_mode, req_mode) &&
-                    lock->l_export != req->l_export)
-                        continue;
 
-                /* If this is a high-traffic lock, don't grow downwards at all
-                 * or grow upwards too much */
-                ++conflicting;
-                if (conflicting > 4)
-                        new_ex->start = req_start;
+static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
+{
+        struct ldlm_resource *res = lock->l_resource;
+        cfs_time_t now = cfs_time_current();
 
-                /* If lock doesn't overlap new_ex, skip it. */
-                if (l_extent->end < new_ex->start ||
-                    l_extent->start > new_ex->end)
-                        continue;
+        if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
+                return 1;
 
-                /* Locks conflicting in requested extents and we can't satisfy
-                 * both locks, so ignore it.  Either we will ping-pong this
-                 * extent (we would regardless of what extent we granted) or
-                 * lock is unused and it shouldn't limit our extent growth. */
-                if (lock->l_req_extent.end >= req_start &&
-                    lock->l_req_extent.start <= req_end)
-                        continue;
+        CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
+        if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
+                res->lr_contention_time = now;
+        return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
+                cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
+}
 
-                /* We grow extents downwards only as far as they don't overlap
-                 * with already-granted locks, on the assumtion that clients
-                 * will be writing beyond the initial requested end and would
-                 * then need to enqueue a new lock beyond previous request.
-                 * l_req_extent->end strictly < req_start, checked above. */
-                if (l_extent->start < req_start && new_ex->start != req_start) {
-                        if (l_extent->end >= req_start)
-                                new_ex->start = req_start;
-                        else
-                                new_ex->start = min(l_extent->end+1, req_start);
-                }
+struct ldlm_extent_compat_args {
+        cfs_list_t *work_list;
+        struct ldlm_lock *lock;
+        ldlm_mode_t mode;
+        int *locks;
+        int *compat;
+        int *conflicts;
+};
+
+static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
+                                                void *data)
+{
+        struct ldlm_extent_compat_args *priv = data;
+        struct ldlm_interval *node = to_ldlm_interval(n);
+        struct ldlm_extent *extent;
+        cfs_list_t *work_list = priv->work_list;
+        struct ldlm_lock *lock, *enq = priv->lock;
+        ldlm_mode_t mode = priv->mode;
+        int count = 0;
+        ENTRY;
 
-                /* If we need to cancel this lock anyways because our request
-                 * overlaps the granted lock, we grow up to its requested
-                 * extent start instead of limiting this extent, assuming that
-                 * clients are writing forwards and the lock had over grown
-                 * its extent downwards before we enqueued our request. */
-                if (l_extent->end > req_end) {
-                        if (l_extent->start <= req_end)
-                                new_ex->end = max(lock->l_req_extent.start - 1,
-                                                  req_end);
-                        else
-                                new_ex->end = max(l_extent->start - 1, req_end);
+        LASSERT(!cfs_list_empty(&node->li_group));
+
+        cfs_list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+                /* interval tree is for granted lock */
+                LASSERTF(mode == lock->l_granted_mode,
+                         "mode = %s, lock->l_granted_mode = %s\n",
+                         ldlm_lockname[mode],
+                         ldlm_lockname[lock->l_granted_mode]);
+
+                /* only count _requested_ region overlapped locks as contended
+                 * locks */
+                if (lock->l_req_extent.end >= enq->l_req_extent.start &&
+                    lock->l_req_extent.start <= enq->l_req_extent.end) {
+                        count++;
+                        (*priv->conflicts)++;
                 }
+                if (lock->l_blocking_ast)
+                        ldlm_add_ast_work_item(lock, enq, work_list);
         }
 
-#define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
-        if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
-                if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
-                        new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
-                                          new_ex->end);
-        }
-        EXIT;
-}
+        /* don't count conflicting glimpse locks */
+        extent = ldlm_interval_extent(node);
+        if (!(mode == LCK_PR &&
+            extent->start == 0 && extent->end == OBD_OBJECT_EOF))
+                *priv->locks += count;
 
-/* In order to determine the largest possible extent we can grant, we need
- * to scan all of the queues. */
-static void ldlm_extent_policy(struct ldlm_resource *res,
-                               struct ldlm_lock *lock, int *flags)
-{
-        struct ldlm_extent new_ex = { .start = 0, .end = ~0};
+        if (priv->compat)
+                *priv->compat = 0;
 
-        if (lock->l_req_mode == LCK_GROUP)
-                return;
-
-        ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
-        ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
-
-        if (new_ex.start != lock->l_policy_data.l_extent.start ||
-            new_ex.end != lock->l_policy_data.l_extent.end) {
-                *flags |= LDLM_FL_LOCK_CHANGED;
-                lock->l_policy_data.l_extent.start = new_ex.start;
-                lock->l_policy_data.l_extent.end = new_ex.end;
-        }
+        RETURN(INTERVAL_ITER_CONT);
 }
 
-/* Determine if the lock is compatible with all locks on the queue.
- * We stop walking the queue if we hit ourselves so we don't take
- * conflicting locks enqueued after us into accound, or we'd wait forever.
- *
- * 0 if the lock is not compatible
- * 1 if the lock is compatible
- * 2 if this group lock is compatible and requires no further checking
- * negative error, such as EWOULDBLOCK for group locks
- */
 static int
-ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
-                         int *flags, struct list_head *work_list,
-                         struct list_head **insertp)
+ldlm_extent_compat_granted_queue(cfs_list_t *queue, struct ldlm_lock *req,
+                                 int *flags, ldlm_error_t *err,
+                                 cfs_list_t *work_list, int *contended_locks)
 {
-        struct list_head *tmp;
-        struct list_head *save = NULL;
-        struct ldlm_lock *lock = NULL;
+        struct ldlm_resource *res = req->l_resource;
         ldlm_mode_t req_mode = req->l_req_mode;
-        int compat = 1;
-        int found = 0;
+        __u64 req_start = req->l_req_extent.start;
+        __u64 req_end = req->l_req_extent.end;
+        int compat = 1, conflicts;
+        /* Using interval tree for granted lock */
+        struct ldlm_interval_tree *tree;
+        struct ldlm_extent_compat_args data = {.work_list = work_list,
+                                       .lock = req,
+                                       .locks = contended_locks,
+                                       .compat = &compat,
+                                       .conflicts = &conflicts };
+        struct interval_node_extent ex = { .start = req_start,
+                                           .end = req_end };
+        int idx, rc;
         ENTRY;
 
-        lockmode_verify(req_mode);
-
-        /* Extent locks are only queued once. We can get back here with
-         * insertp != NULL if the blocking ASTs returned -ERESTART. */ 
-        if (!list_empty(&req->l_res_link))
-                insertp = NULL;
 
-        if (req->l_req_mode != LCK_GROUP) {
-                __u64 req_start = req->l_req_extent.start;
-                __u64 req_end = req->l_req_extent.end;
-
-                list_for_each(tmp, queue) {
-                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);
-                        if (req == lock)
-                                break;
+        for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+                tree = &res->lr_itree[idx];
+                if (tree->lit_root == NULL) /* empty tree, skipped */
+                        continue;
 
-                        if (lock->l_req_mode == LCK_GROUP) {
-                                if (*flags & LDLM_FL_BLOCK_NOWAIT)
-                                        RETURN(-EWOULDBLOCK);
+                data.mode = tree->lit_mode;
+                if (lockmode_compat(req_mode, tree->lit_mode)) {
+                        struct ldlm_interval *node;
+                        struct ldlm_extent *extent;
 
-                                /* No blocking ASTs are sent for group locks. */
-                                compat = 0;
+                        if (req_mode != LCK_GROUP)
+                                continue;
 
-                                /* there's a blocking group lock in front
-                                 * of us on the queue.  It can be held
-                                 * indefinitely, so don't timeout. */
-                                if (insertp) {
-                                        *flags |= LDLM_FL_NO_TIMEOUT;
-                                        /* lock_bitlock(req) is held here. */
-                                        req->l_flags |= LDLM_FL_NO_TIMEOUT;
-                                }
+                        /* group lock, grant it immediately if
+                         * compatible */
+                        node = to_ldlm_interval(tree->lit_root);
+                        extent = ldlm_interval_extent(node);
+                        if (req->l_policy_data.l_extent.gid ==
+                            extent->gid)
+                                RETURN(2);
+                }
 
-                                if (work_list)
-                                        continue;
-                                else
-                                        break;
+                if (tree->lit_mode == LCK_GROUP) {
+                        if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                                compat = -EWOULDBLOCK;
+                                goto destroylock;
                         }
 
-                        /* locks are compatible, overlap doesn't matter */
-                        if (lockmode_compat(lock->l_req_mode, req_mode))
-                                continue;
-
-                        if (lock->l_policy_data.l_extent.end < req_start ||
-                            lock->l_policy_data.l_extent.start > req_end)
-                                continue;
+                        *flags |= LDLM_FL_NO_TIMEOUT;
+                        if (!work_list)
+                                RETURN(0);
 
+                        /* if work list is not NULL,add all
+                           locks in the tree to work list */
                         compat = 0;
+                        interval_iterate(tree->lit_root,
+                                         ldlm_extent_compat_cb, &data);
+                        continue;
+                }
 
-                        if (!work_list)
-                                break;
 
-                        if (lock->l_blocking_ast)
-                                ldlm_add_ast_work_item(lock, req, work_list);
+                if (!work_list) {
+                        rc = interval_is_overlapped(tree->lit_root, &ex);
+                        if (rc)
+                                RETURN(0);
+                } else {
+                        struct interval_node_extent result_ext = {
+                                .start = req->l_policy_data.l_extent.start,
+                                .end = req->l_policy_data.l_extent.end };
+
+                        conflicts = 0;
+                        interval_search_expand_extent(tree->lit_root, &ex,
+                                                      &result_ext,
+                                                      ldlm_extent_compat_cb,
+                                                      &data);
+                        req->l_policy_data.l_extent.start = result_ext.start;
+                        req->l_policy_data.l_extent.end = result_ext.end;
+                        /* for granted locks, count non-compatible not overlapping
+                         * locks in traffic index */
+                        req->l_traffic += tree->lit_size - conflicts;
+
+                        if (!cfs_list_empty(work_list)) {
+                                if (compat)
+                                        compat = 0;
+                                /* if there is at least 1 conflicting lock, we
+                                 * do not expand to the left, since we often
+                                 * continue writing to the right.
+                                 */
+                                req->l_policy_data.l_extent.start = req_start;
+                        }
                 }
+        }
+
+        RETURN(compat);
+destroylock:
+        cfs_list_del_init(&req->l_res_link);
+        ldlm_lock_destroy_nolock(req);
+        *err = compat;
+        RETURN(compat);
+}
 
-                if (insertp)
-                        *insertp = queue;
+static int
+ldlm_extent_compat_waiting_queue(cfs_list_t *queue, struct ldlm_lock *req,
+                                 int *flags, ldlm_error_t *err,
+                                 cfs_list_t *work_list, int *contended_locks)
+{
+        cfs_list_t *tmp;
+        struct ldlm_lock *lock;
+        ldlm_mode_t req_mode = req->l_req_mode;
+        __u64 req_start = req->l_req_extent.start;
+        __u64 req_end = req->l_req_extent.end;
+        int compat = 1;
+        int scan = 0;
+        int check_contention;
+        ENTRY;
 
-                RETURN(compat);
-        }
+        cfs_list_for_each(tmp, queue) {
+                check_contention = 1;
+
+                lock = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
-        list_for_each(tmp, queue) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
                 if (req == lock)
                         break;
 
-                if (lock->l_req_mode != LCK_GROUP) {
-                        if (lock->l_req_mode != lock->l_granted_mode) {
-                                /* we must be traversing the waitq. */
-
-                                /* If a group lock was already found then
-                                 * req can be queued before any extent locks
-                                 * that come after the found group lock. */
-                                if (found)
-                                        break;
+                if (unlikely(scan)) {
+                        /* We only get here if we are queuing GROUP lock
+                           and met some incompatible one. The main idea of this
+                           code is to insert GROUP lock past compatible GROUP
+                           lock in the waiting queue or if there is not any,
+                           then in front of first non-GROUP lock */
+                        if (lock->l_req_mode != LCK_GROUP) {
+                                /* Ok, we hit non-GROUP lock, there should be no
+                                   more GROUP locks later on, queue in front of
+                                   first non-GROUP lock */
+
+                                ldlm_resource_insert_lock_after(lock, req);
+                                cfs_list_del_init(&lock->l_res_link);
+                                ldlm_resource_insert_lock_after(req, lock);
+                                compat = 0;
+                                break;
+                        }
+                        if (req->l_policy_data.l_extent.gid ==
+                            lock->l_policy_data.l_extent.gid) {
+                                /* found it */
+                                ldlm_resource_insert_lock_after(lock, req);
+                                compat = 0;
+                                break;
+                        }
+                        continue;
+                }
 
-                                if (!insertp) {
-                                        /* We've hit a conflicting extent lock
-                                         * on the waitq before hitting the req
-                                         * group lock. See comments below. */
-                                        compat = 0;
-                                        break;
+                /* locks are compatible, overlap doesn't matter */
+                if (lockmode_compat(lock->l_req_mode, req_mode)) {
+                        if (req_mode == LCK_PR &&
+                            ((lock->l_policy_data.l_extent.start <=
+                              req->l_policy_data.l_extent.start) &&
+                             (lock->l_policy_data.l_extent.end >=
+                              req->l_policy_data.l_extent.end))) {
+                                /* If we met a PR lock just like us or wider,
+                                   and nobody down the list conflicted with
+                                   it, that means we can skip processing of
+                                   the rest of the list and safely place
+                                   ourselves at the end of the list, or grant
+                                   (dependent if we met an conflicting locks
+                                   before in the list).
+                                   In case of 1st enqueue only we continue
+                                   traversing if there is something conflicting
+                                   down the list because we need to make sure
+                                   that something is marked as AST_SENT as well,
+                                   in cse of empy worklist we would exit on
+                                   first conflict met. */
+                                /* There IS a case where such flag is
+                                   not set for a lock, yet it blocks
+                                   something. Luckily for us this is
+                                   only during destroy, so lock is
+                                   exclusive. So here we are safe */
+                                if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
+                                        RETURN(compat);
                                 }
+                        }
 
-                                /* Group locks are not normally blocked by
-                                 * waiting PR|PW locks. */
+                        /* non-group locks are compatible, overlap doesn't
+                           matter */
+                        if (likely(req_mode != LCK_GROUP))
+                                continue;
 
-                                /* If NO_TIMEOUT was sent back to the client
-                                 * we can queue the group lock in front of
-                                 * this extent lock. */
-                                if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
-                                        if (save == NULL)
-                                                save = tmp;
-                                        continue;
+                        /* If we are trying to get a GROUP lock and there is
+                           another one of this kind, we need to compare gid */
+                        if (req->l_policy_data.l_extent.gid ==
+                            lock->l_policy_data.l_extent.gid) {
+                                /* We are scanning queue of waiting
+                                 * locks and it means current request would
+                                 * block along with existing lock (that is
+                                 * already blocked.
+                                 * If we are in nonblocking mode - return
+                                 * immediately */
+                                if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                                        compat = -EWOULDBLOCK;
+                                        goto destroylock;
                                 }
-
-                                /* If we did NOT send NO_TIMEOUT back to the
-                                 * client for this extent lock then the client
-                                 * could possibly timeout if we queue this
-                                 * group lock before it, so don't. This is the
-                                 * only way to get a conflicting extent lock
-                                 * in front of a group lock on the waitq. */
+                                /* If this group lock is compatible with another
+                                 * group lock on the waiting list, they must be
+                                 * together in the list, so they can be granted
+                                 * at the same time.  Otherwise the later lock
+                                 * can get stuck behind another, incompatible,
+                                 * lock. */
+                                ldlm_resource_insert_lock_after(lock, req);
+                                /* Because 'lock' is not granted, we can stop
+                                 * processing this queue and return immediately.
+                                 * There is no need to check the rest of the
+                                 * list. */
+                                RETURN(0);
                         }
+                }
 
+                if (unlikely(req_mode == LCK_GROUP &&
+                             (lock->l_req_mode != lock->l_granted_mode))) {
+                        scan = 1;
                         compat = 0;
-                        if (!work_list) {
-                                LASSERT(save == NULL);
+                        if (lock->l_req_mode != LCK_GROUP) {
+                                /* Ok, we hit non-GROUP lock, there should
+                                 * be no more GROUP locks later on, queue in
+                                 * front of first non-GROUP lock */
+
+                                ldlm_resource_insert_lock_after(lock, req);
+                                cfs_list_del_init(&lock->l_res_link);
+                                ldlm_resource_insert_lock_after(req, lock);
                                 break;
                         }
+                        if (req->l_policy_data.l_extent.gid ==
+                            lock->l_policy_data.l_extent.gid) {
+                                /* found it */
+                                ldlm_resource_insert_lock_after(lock, req);
+                                break;
+                        }
+                        continue;
+                }
 
-                        /* If we previously skipped over some extent locks
-                         * because we thought we were going to queue the 
-                         * group lock in front of them then we need to go back
-                         * and send blocking ASTs for the locks we skipped. */
-                        if (save != NULL) {
-                                struct ldlm_lock *lck2;
-
-                                for (; save != tmp; save = save->next) {
-                                        lck2 = list_entry(save,
-                                                          struct ldlm_lock,
-                                                          l_res_link);
-
-                                        /* If there was a group lock after save
-                                         * then we would have exited this loop
-                                         * above. */
-                                        LASSERT(lck2->l_req_mode!=LCK_GROUP);
-
-                                        if (lck2->l_blocking_ast) {
-                                                ldlm_add_ast_work_item(lck2,req,
-                                                                     work_list);
-                                        }
-                                }
-                                save = NULL;
+                if (unlikely(lock->l_req_mode == LCK_GROUP)) {
+                        /* If compared lock is GROUP, then requested is PR/PW/
+                         * so this is not compatible; extent range does not
+                         * matter */
+                        if (*flags & LDLM_FL_BLOCK_NOWAIT) {
+                                compat = -EWOULDBLOCK;
+                                goto destroylock;
+                        } else {
+                                *flags |= LDLM_FL_NO_TIMEOUT;
                         }
+                } else if (!work_list) {
+                        if (lock->l_policy_data.l_extent.end < req_start ||
+                            lock->l_policy_data.l_extent.start > req_end)
+                                /* if a non group lock doesn't overlap skip it */
+                                continue;
+                        RETURN(0);
+                } else {
+                        /* for waiting locks, count all non-compatible locks in
+                         * traffic index */
+                        ++req->l_traffic;
+                        ++lock->l_traffic;
+
+                        /* adjust policy */
+                        if (lock->l_policy_data.l_extent.end < req_start) {
+                                /*     lock            req
+                                 * ------------+
+                                 * ++++++      |   +++++++
+                                 *      +      |   +
+                                 * ++++++      |   +++++++
+                                 * ------------+
+                                 */
+                                if (lock->l_policy_data.l_extent.end >
+                                    req->l_policy_data.l_extent.start)
+                                        req->l_policy_data.l_extent.start =
+                                             lock->l_policy_data.l_extent.end+1;
+                                continue;
+                        } else if (lock->l_req_extent.end < req_start) {
+                                /*     lock            req
+                                 * ------------------+
+                                 * ++++++          +++++++
+                                 *      +          + |
+                                 * ++++++          +++++++
+                                 * ------------------+
+                                 */
+                                lock->l_policy_data.l_extent.end =
+                                                          req_start - 1;
+                                req->l_policy_data.l_extent.start =
+                                                              req_start;
+                                continue;
+                        } else if (lock->l_policy_data.l_extent.start >
+                                   req_end) {
+                                /*  req              lock
+                                 *              +--------------
+                                 *  +++++++     |    +++++++
+                                 *        +     |    +
+                                 *  +++++++     |    +++++++
+                                 *              +--------------
+                                 */
+                                if (lock->l_policy_data.l_extent.start <
+                                    req->l_policy_data.l_extent.end)
+                                        req->l_policy_data.l_extent.end =
+                                           lock->l_policy_data.l_extent.start-1;
+                                continue;
+                        } else if (lock->l_req_extent.start > req_end) {
+                                /*  req              lock
+                                 *      +----------------------
+                                 *  +++++++          +++++++
+                                 *      | +          +
+                                 *  +++++++          +++++++
+                                 *      +----------------------
+                                 */
+                                lock->l_policy_data.l_extent.start =
+                                                            req_end + 1;
+                                req->l_policy_data.l_extent.end=req_end;
+                                continue;
+                        }
+                } /* policy_adj */
+
+                compat = 0;
+                if (work_list) {
+                        /* don't count conflicting glimpse locks */
+                        if (lock->l_flags & LDLM_FL_HAS_INTENT)
+                                check_contention = 0;
+
+                        *contended_locks += check_contention;
 
                         if (lock->l_blocking_ast)
                                 ldlm_add_ast_work_item(lock, req, work_list);
-                        continue;
                 }
+        }
 
-                /* If it was safe to insert a group lock at save,
-                 * i.e. save != NULL, then this group lock already
-                 * on the queue would have been inserted before save. */
-                LASSERT(save == NULL);
-
-                /* Note: no blocking ASTs are sent for group locks. */
+        RETURN(compat);
+destroylock:
+        cfs_list_del_init(&req->l_res_link);
+        ldlm_lock_destroy_nolock(req);
+        *err = compat;
+        RETURN(compat);
+}
+/* Determine if the lock is compatible with all locks on the queue.
+ * We stop walking the queue if we hit ourselves so we don't take
+ * conflicting locks enqueued after us into accound, or we'd wait forever.
+ *
+ * 0 if the lock is not compatible
+ * 1 if the lock is compatible
+ * 2 if this group lock is compatible and requires no further checking
+ * negative error, such as EWOULDBLOCK for group locks
+ *
+ * Note: policy adjustment only happends during the 1st lock enqueue procedure
+ */
+static int
+ldlm_extent_compat_queue(cfs_list_t *queue, struct ldlm_lock *req,
+                         int *flags, ldlm_error_t *err,
+                         cfs_list_t *work_list, int *contended_locks)
+{
+        struct ldlm_resource *res = req->l_resource;
+        ldlm_mode_t req_mode = req->l_req_mode;
+        __u64 req_start = req->l_req_extent.start;
+        __u64 req_end = req->l_req_extent.end;
+        int compat = 1;
+        ENTRY;
 
-                if (lock->l_policy_data.l_extent.gid ==
-                    req->l_policy_data.l_extent.gid) {
-                        /* group locks with this gid already on the waitq. */
-                        found = 2;
+        lockmode_verify(req_mode);
 
-                        if (lock->l_req_mode == lock->l_granted_mode) {
-                                /* if a group lock with this gid has already
-                                 * been granted then grant this one. */
-                                compat = 2;
-                                break;
-                        }
-                } else {
-                        if (found == 2)
-                                break;
+        if (queue == &res->lr_granted)
+                compat = ldlm_extent_compat_granted_queue(queue, req, flags,
+                                                          err, work_list,
+                                                          contended_locks);
+        else
+                compat = ldlm_extent_compat_waiting_queue(queue, req, flags,
+                                                          err, work_list,
+                                                          contended_locks);
 
-                        /* group locks already exist on the queue. */
-                        found = 1;
 
-                        if (*flags & LDLM_FL_BLOCK_NOWAIT)
-                                RETURN(-EWOULDBLOCK);
+        if (ldlm_check_contention(req, *contended_locks) &&
+            compat == 0 &&
+            (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
+            req->l_req_mode != LCK_GROUP &&
+            req_end - req_start <=
+            ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
+                GOTO(destroylock, compat = -EUSERS);
 
-                        compat = 0;
+        RETURN(compat);
+destroylock:
+        cfs_list_del_init(&req->l_res_link);
+        ldlm_lock_destroy_nolock(req);
+        *err = compat;
+        RETURN(compat);
+}
 
-                        /* there's a blocking group lock in front
-                         * of us on the queue.  It can be held
-                         * indefinitely, so don't timeout. */
-                        *flags |= LDLM_FL_NO_TIMEOUT;
+static void discard_bl_list(cfs_list_t *bl_list)
+{
+        cfs_list_t *tmp, *pos;
+        ENTRY;
 
-                        /* the only reason to continue traversing the
-                         * list at this point is to find the proper
-                         * place to insert the lock in the waitq. */
-                        if (!insertp)
-                                break;
-                }
+        cfs_list_for_each_safe(pos, tmp, bl_list) {
+                struct ldlm_lock *lock =
+                        cfs_list_entry(pos, struct ldlm_lock, l_bl_ast);
+
+                cfs_list_del_init(&lock->l_bl_ast);
+                LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+                lock->l_flags &= ~LDLM_FL_AST_SENT;
+                LASSERT(lock->l_bl_ast_run == 0);
+                LASSERT(lock->l_blocking_lock);
+                LDLM_LOCK_RELEASE(lock->l_blocking_lock);
+                lock->l_blocking_lock = NULL;
+                LDLM_LOCK_RELEASE(lock);
         }
+        EXIT;
+}
 
-        if (insertp != NULL) {
-                if (save != NULL)
-                        *insertp = save;
-                else
-                        *insertp = tmp;
-        }
+static inline void ldlm_process_extent_init(struct ldlm_lock *lock)
+{
+        lock->l_policy_data.l_extent.start = 0;
+        lock->l_policy_data.l_extent.end = OBD_OBJECT_EOF;
+}
 
-        RETURN(compat);
+static inline void ldlm_process_extent_fini(struct ldlm_lock *lock, int *flags)
+{
+        if (lock->l_traffic > 4)
+                lock->l_policy_data.l_extent.start = lock->l_req_extent.start;
+        ldlm_extent_internal_policy_fixup(lock,
+                                          &lock->l_policy_data.l_extent,
+                                          lock->l_traffic);
+        if (lock->l_req_extent.start != lock->l_policy_data.l_extent.start ||
+            lock->l_req_extent.end   != lock->l_policy_data.l_extent.end)
+                *flags |= LDLM_FL_LOCK_CHANGED;
 }
 
 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
   *   - blocking ASTs have already been sent
-  *   - the caller has already initialized req->lr_tmp
   *   - must call this function with the ns lock held
   *
   * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
   *   - blocking ASTs have not been sent
-  *   - the caller has NOT initialized req->lr_tmp, so we must
   *   - must call this function with the ns lock held once */
 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                             ldlm_error_t *err, struct list_head *work_list)
+                             ldlm_error_t *err, cfs_list_t *work_list)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
-        struct list_head *insertp = NULL;
+        CFS_LIST_HEAD(rpc_list);
         int rc, rc2;
+        int contended_locks = 0;
         ENTRY;
 
-        LASSERT(list_empty(&res->lr_converting));
+        LASSERT(cfs_list_empty(&res->lr_converting));
+        LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
+                !(lock->l_flags & LDLM_AST_DISCARD_DATA));
+        check_res_locked(res);
         *err = ELDLM_OK;
 
         if (!first_enq) {
-                /* -EWOULDBLOCK can't occur here since (flags & BLOCK_NOWAIT)
-                 * lock requests would either be granted or fail on their
-                 * first_enq. flags should always be zero here, and if that
-                 * ever changes we want to find out. */
+                /* Careful observers will note that we don't handle -EWOULDBLOCK
+                 * here, but it's ok for a non-obvious reason -- compat_queue
+                 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
+                 * flags should always be zero here, and if that ever stops
+                 * being true, we want to find out. */
                 LASSERT(*flags == 0);
-                rc = ldlm_extent_compat_queue(&res->lr_granted, lock,
-                                              flags, NULL, NULL);
+                rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
+                                              err, NULL, &contended_locks);
                 if (rc == 1) {
                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
-                                                      flags, NULL, NULL);
+                                                      flags, err, NULL,
+                                                      &contended_locks);
                 }
                 if (rc == 0)
                         RETURN(LDLM_ITER_STOP);
 
                 ldlm_resource_unlink_lock(lock);
-                ldlm_extent_policy(res, lock, flags);
-                lock_bitlock(lock);
-                lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
-                unlock_bitlock(lock);
+
+                if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE)) {
+                        lock->l_policy_data.l_extent.start =
+                                lock->l_req_extent.start;
+                        lock->l_policy_data.l_extent.end =
+                                lock->l_req_extent.end;
+                } else {
+                        ldlm_process_extent_fini(lock, flags);
+                }
+
                 ldlm_grant_lock(lock, work_list);
                 RETURN(LDLM_ITER_CONTINUE);
         }
 
  restart:
-        rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, &rpc_list,
-                                      NULL);
+        contended_locks = 0;
+
+        ldlm_process_extent_init(lock);
+
+        rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
+                                      &rpc_list, &contended_locks);
         if (rc < 0)
-                GOTO(destroylock, rc);
+                GOTO(out, rc); /* lock was destroyed */
         if (rc == 2)
                 goto grant;
 
-        /* Traverse the waiting list in case there are other conflicting
-         * lock requests ahead of us in the queue and send blocking ASTs */
-        rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, &rpc_list,
-                                       &insertp);
+        rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
+                                       &rpc_list, &contended_locks);
         if (rc2 < 0)
-                GOTO(destroylock, rc);
+                GOTO(out, rc = rc2); /* lock was destroyed */
+
         if (rc + rc2 == 2) {
- grant:
-                ldlm_extent_policy(res, lock, flags);
+        grant:
                 ldlm_resource_unlink_lock(lock);
-                lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
+                ldlm_process_extent_fini(lock, flags);
                 ldlm_grant_lock(lock, NULL);
         } else {
                 /* If either of the compat_queue()s returned failure, then we
@@ -435,36 +681,68 @@ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
                  * bug 2322: we used to unlink and re-add here, which was a
                  * terrible folly -- if we goto restart, we could get
                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
-                        ldlm_resource_add_lock(res, insertp, lock);
+                if (cfs_list_empty(&lock->l_res_link))
+                        ldlm_resource_add_lock(res, &res->lr_waiting, lock);
                 unlock_res(res);
-                rc = ldlm_run_bl_ast_work(&rpc_list);
+                rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
+
+                if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
+                    !ns_is_client(ldlm_res_to_ns(res)))
+                        class_fail_export(lock->l_export);
                 lock_res(res);
-                if (rc == -ERESTART)
+                if (rc == -ERESTART) {
+
+                        /* 15715: The lock was granted and destroyed after
+                         * resource lock was dropped. Interval node was freed
+                         * in ldlm_lock_destroy. Anyway, this always happens
+                         * when a client is being evicted. So it would be
+                         * ok to return an error. -jay */
+                        if (lock->l_destroyed) {
+                                *err = -EAGAIN;
+                                GOTO(out, rc = -EAGAIN);
+                        }
+
+                        /* lock was granted while resource was unlocked. */
+                        if (lock->l_granted_mode == lock->l_req_mode) {
+                                /* bug 11300: if the lock has been granted,
+                                 * break earlier because otherwise, we will go
+                                 * to restart and ldlm_resource_unlink will be
+                                 * called and it causes the interval node to be
+                                 * freed. Then we will fail at
+                                 * ldlm_extent_add_lock() */
+                                *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV |
+                                            LDLM_FL_BLOCK_WAIT);
+                                GOTO(out, rc = 0);
+                        }
+
                         GOTO(restart, -ERESTART);
+                }
+
                 *flags |= LDLM_FL_BLOCK_GRANTED;
-        }
+                /* this way we force client to wait for the lock
+                 * endlessly once the lock is enqueued -bzzz */
+                *flags |= LDLM_FL_NO_TIMEOUT;
 
+        }
         RETURN(0);
-
- destroylock:
-        list_del_init(&lock->l_res_link);
-        unlock_res(res);
-        ldlm_lock_destroy(lock);
-        lock_res(res);
-        *err = rc;
+out:
+        if (!cfs_list_empty(&rpc_list)) {
+                LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
+                discard_bl_list(&rpc_list);
+        }
         RETURN(rc);
 }
 
 /* When a lock is cancelled by a client, the KMS may undergo change if this
  * is the "highest lock".  This function returns the new KMS value.
- * Caller must hold ns_lock already. 
+ * Caller must hold ns_lock already.
  *
  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 {
         struct ldlm_resource *res = lock->l_resource;
-        struct list_head *tmp;
+        cfs_list_t *tmp;
         struct ldlm_lock *lck;
         __u64 kms = 0;
         ENTRY;
@@ -472,11 +750,10 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
         /* don't let another thread in ldlm_extent_shift_kms race in
          * just after we finish and take our lock into account in its
          * calculation of the kms */
-
         lock->l_flags |= LDLM_FL_KMS_IGNORE;
 
-        list_for_each(tmp, &res->lr_granted) {
-                lck = list_entry(tmp, struct ldlm_lock, l_res_link);
+        cfs_list_for_each(tmp, &res->lr_granted) {
+                lck = cfs_list_entry(tmp, struct ldlm_lock, l_res_link);
 
                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
                         continue;
@@ -493,3 +770,125 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
 
         RETURN(kms);
 }
+
+cfs_mem_cache_t *ldlm_interval_slab;
+struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
+{
+        struct ldlm_interval *node;
+        ENTRY;
+
+        LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
+        OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO);
+        if (node == NULL)
+                RETURN(NULL);
+
+        CFS_INIT_LIST_HEAD(&node->li_group);
+        ldlm_interval_attach(node, lock);
+        RETURN(node);
+}
+
+void ldlm_interval_free(struct ldlm_interval *node)
+{
+        if (node) {
+                LASSERT(cfs_list_empty(&node->li_group));
+                LASSERT(!interval_is_intree(&node->li_node));
+                OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
+        }
+}
+
+/* interval tree, for LDLM_EXTENT. */
+void ldlm_interval_attach(struct ldlm_interval *n,
+                          struct ldlm_lock *l)
+{
+        LASSERT(l->l_tree_node == NULL);
+        LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
+
+        cfs_list_add_tail(&l->l_sl_policy, &n->li_group);
+        l->l_tree_node = n;
+}
+
+struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
+{
+        struct ldlm_interval *n = l->l_tree_node;
+
+        if (n == NULL)
+                return NULL;
+
+        LASSERT(!cfs_list_empty(&n->li_group));
+        l->l_tree_node = NULL;
+        cfs_list_del_init(&l->l_sl_policy);
+
+        return (cfs_list_empty(&n->li_group) ? n : NULL);
+}
+
+static inline int lock_mode_to_index(ldlm_mode_t mode)
+{
+        int index;
+
+        LASSERT(mode != 0);
+        LASSERT(IS_PO2(mode));
+        for (index = -1; mode; index++, mode >>= 1) ;
+        LASSERT(index < LCK_MODE_NUM);
+        return index;
+}
+
+void ldlm_extent_add_lock(struct ldlm_resource *res,
+                          struct ldlm_lock *lock)
+{
+        struct interval_node *found, **root;
+        struct ldlm_interval *node;
+        struct ldlm_extent *extent;
+        int idx;
+
+        LASSERT(lock->l_granted_mode == lock->l_req_mode);
+
+        node = lock->l_tree_node;
+        LASSERT(node != NULL);
+        LASSERT(!interval_is_intree(&node->li_node));
+
+        idx = lock_mode_to_index(lock->l_granted_mode);
+        LASSERT(lock->l_granted_mode == 1 << idx);
+        LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
+
+        /* node extent initialize */
+        extent = &lock->l_policy_data.l_extent;
+        interval_set(&node->li_node, extent->start, extent->end);
+
+        root = &res->lr_itree[idx].lit_root;
+        found = interval_insert(&node->li_node, root);
+        if (found) { /* The policy group found. */
+                struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+                LASSERT(tmp != NULL);
+                ldlm_interval_free(tmp);
+                ldlm_interval_attach(to_ldlm_interval(found), lock);
+        }
+        res->lr_itree[idx].lit_size++;
+
+        /* even though we use interval tree to manage the extent lock, we also
+         * add the locks into grant list, for debug purpose, .. */
+        ldlm_resource_add_lock(res, &res->lr_granted, lock);
+}
+
+void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
+{
+        struct ldlm_resource *res = lock->l_resource;
+        struct ldlm_interval *node = lock->l_tree_node;
+        struct ldlm_interval_tree *tree;
+        int idx;
+
+        if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
+                return;
+
+        idx = lock_mode_to_index(lock->l_granted_mode);
+        LASSERT(lock->l_granted_mode == 1 << idx);
+        tree = &res->lr_itree[idx];
+
+        LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+
+        tree->lit_size--;
+        node = ldlm_interval_detach(lock);
+        if (node) {
+                interval_erase(&node->li_node, &tree->lit_root);
+                ldlm_interval_free(node);
+        }
+}