4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Range lock is used to allow multiple threads writing a single shared
24 * file given each thread is writing to a non-overlapping portion of the
27 * Refer to the possible upstream kernel version of range lock by
28 * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
30 * This file could later replaced by the upstream kernel version.
33 * Author: Prakash Surya <surya1@llnl.gov>
34 * Author: Bobi Jam <bobijam.xu@intel.com>
36 #include "range_lock.h"
37 #include <lustre/lustre_user.h>
40 * Initialize a range lock tree
42 * \param tree [in] an empty range lock tree
44 * Pre: Caller should have allocated the range lock tree.
45 * Post: The range lock tree is ready to function.
47 void range_lock_tree_init(struct range_lock_tree *tree)
49 tree->rlt_root = NULL;
50 tree->rlt_sequence = 0;
51 spin_lock_init(&tree->rlt_lock);
55 * Intialize a range lock node
57 * \param lock [in] an empty range lock node
58 * \param start [in] start of the covering region
59 * \param end [in] end of the covering region
61 * Pre: Caller should have allocated the range lock node.
62 * Post: The range lock node is meant to cover [start, end] region
64 void range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
66 interval_init(&lock->rl_node);
67 if (end != LUSTRE_EOF)
69 interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
70 INIT_LIST_HEAD(&lock->rl_next_lock);
72 lock->rl_lock_count = 0;
73 lock->rl_blocking_ranges = 0;
74 lock->rl_sequence = 0;
77 static inline struct range_lock *next_lock(struct range_lock *lock)
79 return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock);
83 * Helper function of range_unlock()
85 * \param node [in] a range lock found overlapped during interval node
87 * \param arg [in] the range lock to be tested
89 * \retval INTERVAL_ITER_CONT indicate to continue the search for next
90 * overlapping range node
91 * \retval INTERVAL_ITER_STOP indicate to stop the search
93 static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
95 struct range_lock *lock = arg;
96 struct range_lock *overlap = node2rangelock(node);
97 struct range_lock *iter;
100 list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) {
101 if (iter->rl_sequence > lock->rl_sequence) {
102 --iter->rl_blocking_ranges;
103 LASSERT(iter->rl_blocking_ranges > 0);
106 if (overlap->rl_sequence > lock->rl_sequence) {
107 --overlap->rl_blocking_ranges;
108 if (overlap->rl_blocking_ranges == 0)
109 wake_up_process(overlap->rl_task);
111 RETURN(INTERVAL_ITER_CONT);
115 * Unlock a range lock, wake up locks blocked by this lock.
117 * \param tree [in] range lock tree
118 * \param lock [in] range lock to be deleted
120 * If this lock has been granted, relase it; if not, just delete it from
121 * the tree or the same region lock list. Wake up those locks only blocked
122 * by this lock through range_unlock_cb().
124 void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
128 spin_lock(&tree->rlt_lock);
129 if (!list_empty(&lock->rl_next_lock)) {
130 struct range_lock *next;
132 if (interval_is_intree(&lock->rl_node)) { /* first lock */
133 /* Insert the next same range lock into the tree */
134 next = next_lock(lock);
135 next->rl_lock_count = lock->rl_lock_count - 1;
136 interval_erase(&lock->rl_node, &tree->rlt_root);
137 interval_insert(&next->rl_node, &tree->rlt_root);
139 /* find the first lock in tree */
140 list_for_each_entry(next, &lock->rl_next_lock,
142 if (!interval_is_intree(&next->rl_node))
145 LASSERT(next->rl_lock_count > 0);
146 next->rl_lock_count--;
150 list_del_init(&lock->rl_next_lock);
152 LASSERT(interval_is_intree(&lock->rl_node));
153 interval_erase(&lock->rl_node, &tree->rlt_root);
156 interval_search(tree->rlt_root, &lock->rl_node.in_extent,
157 range_unlock_cb, lock);
158 spin_unlock(&tree->rlt_lock);
164 * Helper function of range_lock()
166 * \param node [in] a range lock found overlapped during interval node
168 * \param arg [in] the range lock to be tested
170 * \retval INTERVAL_ITER_CONT indicate to continue the search for next
171 * overlapping range node
172 * \retval INTERVAL_ITER_STOP indicate to stop the search
174 static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
176 struct range_lock *lock = (struct range_lock *)arg;
177 struct range_lock *overlap = node2rangelock(node);
179 lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
180 RETURN(INTERVAL_ITER_CONT);
186 * \param tree [in] range lock tree
187 * \param lock [in] range lock node containing the region span
189 * \retval 0 get the range lock
190 * \retval <0 error code while not getting the range lock
192 * If there exists overlapping range lock, the new lock will wait and
193 * retry, if later it find that it is not the chosen one to wake up,
196 int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
198 struct interval_node *node;
202 spin_lock(&tree->rlt_lock);
204 * We need to check for all conflicting intervals
205 * already in the tree.
207 interval_search(tree->rlt_root, &lock->rl_node.in_extent,
208 range_lock_cb, lock);
210 * Insert to the tree if I am unique, otherwise I've been linked to
211 * the rl_next_lock of another lock which has the same range as mine
212 * in range_lock_cb().
214 node = interval_insert(&lock->rl_node, &tree->rlt_root);
216 struct range_lock *tmp = node2rangelock(node);
218 list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock);
219 tmp->rl_lock_count++;
221 lock->rl_sequence = ++tree->rlt_sequence;
223 while (lock->rl_blocking_ranges > 0) {
224 lock->rl_task = current;
225 __set_current_state(TASK_INTERRUPTIBLE);
226 spin_unlock(&tree->rlt_lock);
229 if (signal_pending(current)) {
230 range_unlock(tree, lock);
231 GOTO(out, rc = -EINTR);
233 spin_lock(&tree->rlt_lock);
235 spin_unlock(&tree->rlt_lock);