Whamcloud - gitweb
LU-11085 llite: reimplement range_lock with Linux interval_tree 26/39726/8
authorMr NeilBrown <neilb@suse.de>
Tue, 25 Aug 2020 01:48:34 +0000 (11:48 +1000)
committerOleg Drokin <green@whamcloud.com>
Wed, 5 May 2021 02:49:34 +0000 (02:49 +0000)
As a step towards removing the lustre interval-tree implementation,
reimplent range_lock to use Linux interval trees.

As Linux interval trees allow the same interval to be stored twice,
this allows the removal of the rl_next_lock list and associated code.

Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I1d1669345fac0945e0e189b87a74ca8e7582e842
Reviewed-on: https://review.whamcloud.com/39726
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/range_lock.h
lustre/obdclass/range_lock.c

index 5266db7..674b27d 100644 (file)
 #define _RANGE_LOCK_H
 
 #include <libcfs/libcfs.h>
-#include <interval_tree.h>
 
 #define RL_FMT "[%llu, %llu]"
-#define RL_PARA(range)                         \
-       (range)->rl_node.in_extent.start,       \
-       (range)->rl_node.in_extent.end
+#define RL_PARA(range)                                 \
+       (unsigned long long)(range)->rl_start,  \
+       (unsigned long long)(range)->rl_end
 
 struct range_lock {
-       struct interval_node    rl_node;
+       __u64                           rl_start,
+                                       rl_end,
+                                       rl_subtree_last;
+       struct rb_node                  rl_rb;
        /**
         * Process to enqueue this lock.
         */
-       struct task_struct      *rl_task;
-       /**
-        * List of locks with the same range.
-        */
-       struct list_head        rl_next_lock;
-       /**
-        * Number of locks in the list rl_next_lock
-        */
-       unsigned int            rl_lock_count;
+       struct task_struct              *rl_task;
        /**
         * Number of ranges which are blocking acquisition of the lock
         */
-       unsigned int            rl_blocking_ranges;
+       unsigned int                    rl_blocking_ranges;
        /**
         * Sequence number of range lock. This number is used to get to know
-        * the order the locks are queued; this is required for range_cancel().
+        * the order the locks are queued.  One lock can only block another
+        * if it has a higher rl_sequence.
         */
-       __u64                   rl_sequence;
+       __u64                           rl_sequence;
 };
 
-static inline struct range_lock *node2rangelock(const struct interval_node *n)
-{
-       return container_of(n, struct range_lock, rl_node);
-}
-
 struct range_lock_tree {
-       struct interval_node    *rlt_root;
-       spinlock_t               rlt_lock;
-       __u64                    rlt_sequence;
+       struct interval_tree_root       rlt_root;
+       spinlock_t                      rlt_lock;
+       __u64                           rlt_sequence;
 };
 
 void range_lock_tree_init(struct range_lock_tree *tree);
-int  range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
+void range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
 int  range_lock(struct range_lock_tree *tree, struct range_lock *lock);
 void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
 #endif
index 140337f..ae3ed9a 100644 (file)
 #ifdef HAVE_SCHED_HEADERS
 #include <linux/sched/signal.h>
 #endif
+#include <linux/interval_tree_generic.h>
 #include <uapi/linux/lustre/lustre_user.h>
 #include <range_lock.h>
 
+#define START(node)    ((node)->rl_start)
+#define LAST(node)     ((node)->rl_end)
+
+INTERVAL_TREE_DEFINE(struct range_lock, rl_rb, __u64, rl_subtree_last,
+                    START, LAST, static, range_lock)
+
 /**
  * Initialize a range lock tree
  *
@@ -49,7 +56,7 @@
  */
 void range_lock_tree_init(struct range_lock_tree *tree)
 {
-       tree->rlt_root = NULL;
+       tree->rlt_root = INTERVAL_TREE_ROOT;
        tree->rlt_sequence = 0;
        spin_lock_init(&tree->rlt_lock);
 }
@@ -65,63 +72,20 @@ EXPORT_SYMBOL(range_lock_tree_init);
  * Pre:  Caller should have allocated the range lock node.
  * Post: The range lock node is meant to cover [start, end] region
  */
-int range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
+void range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
 {
-       int rc;
-
-       interval_init(&lock->rl_node);
+       start >>= PAGE_SHIFT;
        if (end != LUSTRE_EOF)
                end >>= PAGE_SHIFT;
-       rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
-       if (rc)
-               return rc;
+       lock->rl_start = start;
+       lock->rl_end = end;
 
-       INIT_LIST_HEAD(&lock->rl_next_lock);
        lock->rl_task = NULL;
-       lock->rl_lock_count = 0;
        lock->rl_blocking_ranges = 0;
        lock->rl_sequence = 0;
-       return rc;
 }
 EXPORT_SYMBOL(range_lock_init);
 
-static inline struct range_lock *next_lock(struct range_lock *lock)
-{
-       return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock);
-}
-
-/**
- * Helper function of range_unlock()
- *
- * \param node [in]    a range lock found overlapped during interval node
- *                     search
- * \param arg [in]     the range lock to be tested
- *
- * \retval INTERVAL_ITER_CONT  indicate to continue the search for next
- *                             overlapping range node
- * \retval INTERVAL_ITER_STOP  indicate to stop the search
- */
-static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
-{
-       struct range_lock *lock = arg;
-       struct range_lock *overlap = node2rangelock(node);
-       struct range_lock *iter;
-       ENTRY;
-
-       list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) {
-               if (iter->rl_sequence > lock->rl_sequence) {
-                       --iter->rl_blocking_ranges;
-                       LASSERT(iter->rl_blocking_ranges > 0);
-               }
-       }
-       if (overlap->rl_sequence > lock->rl_sequence) {
-               --overlap->rl_blocking_ranges;
-               if (overlap->rl_blocking_ranges == 0)
-                       wake_up_process(overlap->rl_task);
-       }
-       RETURN(INTERVAL_ITER_CONT);
-}
-
 /**
  * Unlock a range lock, wake up locks blocked by this lock.
  *
@@ -130,42 +94,30 @@ static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
  *
  * If this lock has been granted, relase it; if not, just delete it from
  * the tree or the same region lock list. Wake up those locks only blocked
- * by this lock through range_unlock_cb().
+ * by this lock.
  */
 void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
 {
+       struct range_lock *overlap;
        ENTRY;
 
        spin_lock(&tree->rlt_lock);
-       if (!list_empty(&lock->rl_next_lock)) {
-               struct range_lock *next;
-
-               if (interval_is_intree(&lock->rl_node)) { /* first lock */
-                       /* Insert the next same range lock into the tree */
-                       next = next_lock(lock);
-                       next->rl_lock_count = lock->rl_lock_count - 1;
-                       interval_erase(&lock->rl_node, &tree->rlt_root);
-                       interval_insert(&next->rl_node, &tree->rlt_root);
-               } else {
-                       /* find the first lock in tree */
-                       list_for_each_entry(next, &lock->rl_next_lock,
-                                           rl_next_lock) {
-                               if (!interval_is_intree(&next->rl_node))
-                                       continue;
-
-                               LASSERT(next->rl_lock_count > 0);
-                               next->rl_lock_count--;
-                               break;
-                       }
+
+       range_lock_remove(lock, &tree->rlt_root);
+
+       for (overlap = range_lock_iter_first(&tree->rlt_root,
+                                            lock->rl_start,
+                                            lock->rl_end);
+            overlap;
+            overlap = range_lock_iter_next(overlap,
+                                           lock->rl_start,
+                                           lock->rl_end))
+               if (overlap->rl_sequence > lock->rl_sequence) {
+                       --overlap->rl_blocking_ranges;
+                       if (overlap->rl_blocking_ranges == 0)
+                               wake_up_process(overlap->rl_task);
                }
-               list_del_init(&lock->rl_next_lock);
-       } else {
-               LASSERT(interval_is_intree(&lock->rl_node));
-               interval_erase(&lock->rl_node, &tree->rlt_root);
-       }
 
-       interval_search(tree->rlt_root, &lock->rl_node.in_extent,
-                       range_unlock_cb, lock);
        spin_unlock(&tree->rlt_lock);
 
        EXIT;
@@ -173,26 +125,6 @@ void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
 EXPORT_SYMBOL(range_unlock);
 
 /**
- * Helper function of range_lock()
- *
- * \param node [in]    a range lock found overlapped during interval node
- *                     search
- * \param arg [in]     the range lock to be tested
- *
- * \retval INTERVAL_ITER_CONT  indicate to continue the search for next
- *                             overlapping range node
- * \retval INTERVAL_ITER_STOP  indicate to stop the search
- */
-static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
-{
-       struct range_lock *lock = (struct range_lock *)arg;
-       struct range_lock *overlap = node2rangelock(node);
-
-       lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
-       RETURN(INTERVAL_ITER_CONT);
-}
-
-/**
  * Lock a region
  *
  * \param tree [in]    range lock tree
@@ -207,7 +139,7 @@ static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
  */
 int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
 {
-       struct interval_node *node;
+       struct range_lock *overlap;
        int rc = 0;
        ENTRY;
 
@@ -216,20 +148,16 @@ int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
         * We need to check for all conflicting intervals
         * already in the tree.
         */
-       interval_search(tree->rlt_root, &lock->rl_node.in_extent,
-                       range_lock_cb, lock);
-       /*
-        * Insert to the tree if I am unique, otherwise I've been linked to
-        * the rl_next_lock of another lock which has the same range as mine
-        * in range_lock_cb().
-        */
-       node = interval_insert(&lock->rl_node, &tree->rlt_root);
-       if (node != NULL) {
-               struct range_lock *tmp = node2rangelock(node);
-
-               list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock);
-               tmp->rl_lock_count++;
-       }
+       for (overlap = range_lock_iter_first(&tree->rlt_root,
+                                            lock->rl_start,
+                                            lock->rl_end);
+            overlap;
+            overlap = range_lock_iter_next(overlap,
+                                           lock->rl_start,
+                                           lock->rl_end))
+               lock->rl_blocking_ranges += 1;
+
+       range_lock_insert(lock, &tree->rlt_root);
        lock->rl_sequence = ++tree->rlt_sequence;
 
        while (lock->rl_blocking_ranges > 0) {