4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Range lock is used to allow multiple threads writing a single shared
24 * file given each thread is writing to a non-overlapping portion of the
27 * Refer to the possible upstream kernel version of range lock by
28 * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
30 * This file could later replaced by the upstream kernel version.
33 * Author: Prakash Surya <surya1@llnl.gov>
34 * Author: Bobi Jam <bobijam.xu@intel.com>
36 #include "range_lock.h"
37 #include <lustre/lustre_user.h>
40 * Initialize a range lock tree
42 * \param tree [in] an empty range lock tree
44 * Pre: Caller should have allocated the range lock tree.
45 * Post: The range lock tree is ready to function.
47 void range_lock_tree_init(struct range_lock_tree *tree)
49 tree->rlt_root = NULL;
50 tree->rlt_sequence = 0;
51 spin_lock_init(&tree->rlt_lock);
55 * Intialize a range lock node
57 * \param lock [in] an empty range lock node
58 * \param start [in] start of the covering region
59 * \param end [in] end of the covering region
61 * Pre: Caller should have allocated the range lock node.
62 * Post: The range lock node is meant to cover [start, end] region
64 int range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
68 interval_init(&lock->rl_node);
69 if (end != LUSTRE_EOF)
71 rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
75 INIT_LIST_HEAD(&lock->rl_next_lock);
77 lock->rl_lock_count = 0;
78 lock->rl_blocking_ranges = 0;
79 lock->rl_sequence = 0;
83 static inline struct range_lock *next_lock(struct range_lock *lock)
85 return list_entry(lock->rl_next_lock.next, typeof(*lock), rl_next_lock);
89 * Helper function of range_unlock()
91 * \param node [in] a range lock found overlapped during interval node
93 * \param arg [in] the range lock to be tested
95 * \retval INTERVAL_ITER_CONT indicate to continue the search for next
96 * overlapping range node
97 * \retval INTERVAL_ITER_STOP indicate to stop the search
99 static enum interval_iter range_unlock_cb(struct interval_node *node, void *arg)
101 struct range_lock *lock = arg;
102 struct range_lock *overlap = node2rangelock(node);
103 struct range_lock *iter;
106 list_for_each_entry(iter, &overlap->rl_next_lock, rl_next_lock) {
107 if (iter->rl_sequence > lock->rl_sequence) {
108 --iter->rl_blocking_ranges;
109 LASSERT(iter->rl_blocking_ranges > 0);
112 if (overlap->rl_sequence > lock->rl_sequence) {
113 --overlap->rl_blocking_ranges;
114 if (overlap->rl_blocking_ranges == 0)
115 wake_up_process(overlap->rl_task);
117 RETURN(INTERVAL_ITER_CONT);
121 * Unlock a range lock, wake up locks blocked by this lock.
123 * \param tree [in] range lock tree
124 * \param lock [in] range lock to be deleted
126 * If this lock has been granted, relase it; if not, just delete it from
127 * the tree or the same region lock list. Wake up those locks only blocked
128 * by this lock through range_unlock_cb().
130 void range_unlock(struct range_lock_tree *tree, struct range_lock *lock)
134 spin_lock(&tree->rlt_lock);
135 if (!list_empty(&lock->rl_next_lock)) {
136 struct range_lock *next;
138 if (interval_is_intree(&lock->rl_node)) { /* first lock */
139 /* Insert the next same range lock into the tree */
140 next = next_lock(lock);
141 next->rl_lock_count = lock->rl_lock_count - 1;
142 interval_erase(&lock->rl_node, &tree->rlt_root);
143 interval_insert(&next->rl_node, &tree->rlt_root);
145 /* find the first lock in tree */
146 list_for_each_entry(next, &lock->rl_next_lock,
148 if (!interval_is_intree(&next->rl_node))
151 LASSERT(next->rl_lock_count > 0);
152 next->rl_lock_count--;
156 list_del_init(&lock->rl_next_lock);
158 LASSERT(interval_is_intree(&lock->rl_node));
159 interval_erase(&lock->rl_node, &tree->rlt_root);
162 interval_search(tree->rlt_root, &lock->rl_node.in_extent,
163 range_unlock_cb, lock);
164 spin_unlock(&tree->rlt_lock);
170 * Helper function of range_lock()
172 * \param node [in] a range lock found overlapped during interval node
174 * \param arg [in] the range lock to be tested
176 * \retval INTERVAL_ITER_CONT indicate to continue the search for next
177 * overlapping range node
178 * \retval INTERVAL_ITER_STOP indicate to stop the search
180 static enum interval_iter range_lock_cb(struct interval_node *node, void *arg)
182 struct range_lock *lock = (struct range_lock *)arg;
183 struct range_lock *overlap = node2rangelock(node);
185 lock->rl_blocking_ranges += overlap->rl_lock_count + 1;
186 RETURN(INTERVAL_ITER_CONT);
192 * \param tree [in] range lock tree
193 * \param lock [in] range lock node containing the region span
195 * \retval 0 get the range lock
196 * \retval <0 error code while not getting the range lock
198 * If there exists overlapping range lock, the new lock will wait and
199 * retry, if later it find that it is not the chosen one to wake up,
202 int range_lock(struct range_lock_tree *tree, struct range_lock *lock)
204 struct interval_node *node;
208 spin_lock(&tree->rlt_lock);
210 * We need to check for all conflicting intervals
211 * already in the tree.
213 interval_search(tree->rlt_root, &lock->rl_node.in_extent,
214 range_lock_cb, lock);
216 * Insert to the tree if I am unique, otherwise I've been linked to
217 * the rl_next_lock of another lock which has the same range as mine
218 * in range_lock_cb().
220 node = interval_insert(&lock->rl_node, &tree->rlt_root);
222 struct range_lock *tmp = node2rangelock(node);
224 list_add_tail(&lock->rl_next_lock, &tmp->rl_next_lock);
225 tmp->rl_lock_count++;
227 lock->rl_sequence = ++tree->rlt_sequence;
229 while (lock->rl_blocking_ranges > 0) {
230 lock->rl_task = current;
231 __set_current_state(TASK_INTERRUPTIBLE);
232 spin_unlock(&tree->rlt_lock);
235 if (signal_pending(current)) {
236 range_unlock(tree, lock);
237 GOTO(out, rc = -ERESTARTSYS);
239 spin_lock(&tree->rlt_lock);
241 spin_unlock(&tree->rlt_lock);