4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Range lock is used to allow multiple threads writing a single shared
24 * file given each thread is writing to a non-overlapping portion of the
27 * Refer to the possible upstream kernel version of range lock by
28 * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
30 * This file could later replaced by the upstream kernel version.
33 * Author: Prakash Surya <surya1@llnl.gov>
34 * Author: Bobi Jam <bobijam.xu@intel.com>
39 #include <libcfs/libcfs.h>
40 #include <interval_tree.h>
42 #define RL_FMT "["LPU64", "LPU64"]"
43 #define RL_PARA(range) \
44 (range)->rl_node.in_extent.start, \
45 (range)->rl_node.in_extent.end
48 struct interval_node rl_node;
50 * Process to enqueue this lock.
52 struct task_struct *rl_task;
54 * List of locks with the same range.
56 struct list_head rl_next_lock;
58 * Number of locks in the list rl_next_lock
60 unsigned int rl_lock_count;
62 * Number of ranges which are blocking acquisition of the lock
64 unsigned int rl_blocking_ranges;
66 * Sequence number of range lock. This number is used to get to know
67 * the order the locks are queued; this is required for range_cancel().
72 static inline struct range_lock *node2rangelock(const struct interval_node *n)
74 return container_of(n, struct range_lock, rl_node);
77 struct range_lock_tree {
78 struct interval_node *rlt_root;
83 void range_lock_tree_init(struct range_lock_tree *tree);
84 void range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
85 int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
86 void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);