1 include/linux/dynlocks.h | 33 ++++++++++
3 lib/dynlocks.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++
4 3 files changed, 187 insertions(+), 2 deletions(-)
6 Index: linux/fs/dcache.c
7 ===================================================================
8 --- linux.orig/fs/dcache.c
10 @@ -1678,6 +1678,7 @@ EXPORT_SYMBOL(d_genocide);
12 extern void bdev_cache_init(void);
13 extern void chrdev_init(void);
14 +extern void dynlock_cache_init(void);
16 void __init vfs_caches_init_early(void)
18 @@ -1707,6 +1708,7 @@ void __init vfs_caches_init(unsigned lon
22 + dynlock_cache_init();
25 EXPORT_SYMBOL(d_alloc);
26 Index: linux/include/linux/dynlocks.h
27 ===================================================================
28 --- linux.orig/include/linux/dynlocks.h
29 +++ linux/include/linux/dynlocks.h
31 +#ifndef _LINUX_DYNLOCKS_H
32 +#define _LINUX_DYNLOCKS_H
34 +#include <linux/list.h>
35 +#include <linux/wait.h>
37 +#define DYNLOCK_MAGIC 0xd19a10c
38 +#define DYNLOCK_MAGIC2 0xd1956ee
41 +struct dynlock_handle;
46 + * - lock to protect this list
51 + struct list_head dl_list;
52 + spinlock_t dl_list_lock;
60 +void dynlock_init(struct dynlock *dl);
61 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
62 + enum dynlock_type lt, int gfp);
63 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
65 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
68 Index: linux/lib/Makefile
69 ===================================================================
70 --- linux.orig/lib/Makefile
71 +++ linux/lib/Makefile
73 lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
74 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
75 kobject.o kref.o idr.o div64.o parser.o int_sqrt.o \
77 + bitmap.o extable.o dynlocks.o
81 Index: linux/lib/dynlocks.c
82 ===================================================================
83 --- linux.orig/lib/dynlocks.c
84 +++ linux/lib/dynlocks.c
89 + * struct dynlock is lockspace
90 + * one may request lock (exclusive or shared) for some value
95 +#include <linux/dynlocks.h>
96 +#include <linux/module.h>
97 +#include <linux/slab.h>
98 +#include <linux/sched.h>
100 +static kmem_cache_t * dynlock_cachep = NULL;
102 +struct dynlock_handle {
104 + struct list_head dl_list;
105 + unsigned long dl_value; /* lock value */
106 + int dl_refcount; /* number of users */
109 + int dl_pid; /* holder of the lock */
110 + wait_queue_head_t dl_wait;
113 +#define DYNLOCK_LIST_MAGIC 0x11ee91e6
115 +void __init dynlock_cache_init(void)
117 + printk(KERN_INFO "init dynlocks cache\n");
118 + dynlock_cachep = kmem_cache_create("dynlock_cache",
119 + sizeof(struct dynlock_handle),
121 + SLAB_HWCACHE_ALIGN,
123 + if (dynlock_cachep == NULL)
124 + panic("Can't create dynlock cache");
130 + * initialize lockspace
133 +void dynlock_init(struct dynlock *dl)
135 + spin_lock_init(&dl->dl_list_lock);
136 + INIT_LIST_HEAD(&dl->dl_list);
137 + dl->dl_magic = DYNLOCK_LIST_MAGIC;
143 + * acquires lock (exclusive or shared) in specified lockspace
144 + * each lock in lockspace is allocated separately, so user have
145 + * to specify GFP flags.
146 + * routine returns pointer to lock. this pointer is intended to
147 + * be passed to dynlock_unlock
150 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
151 + enum dynlock_type lt, int gfp)
153 + struct dynlock_handle *nhl = NULL;
154 + struct dynlock_handle *hl;
156 + BUG_ON(dl == NULL);
157 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
160 + /* find requested lock in lockspace */
161 + spin_lock(&dl->dl_list_lock);
162 + BUG_ON(dl->dl_list.next == NULL);
163 + BUG_ON(dl->dl_list.prev == NULL);
164 + list_for_each_entry(hl, &dl->dl_list, dl_list) {
165 + BUG_ON(hl->dl_list.next == NULL);
166 + BUG_ON(hl->dl_list.prev == NULL);
167 + BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
168 + if (hl->dl_value == value) {
169 + /* lock is found */
171 + /* someone else just allocated
172 + * lock we didn't find and just created
173 + * so, we drop our lock
175 + kmem_cache_free(dynlock_cachep, nhl);
182 + /* lock not found */
184 + /* we already have allocated lock. use it */
187 + list_add(&hl->dl_list, &dl->dl_list);
190 + spin_unlock(&dl->dl_list_lock);
192 + /* lock not found and we haven't allocated lock yet. allocate it */
193 + nhl = kmem_cache_alloc(dynlock_cachep, gfp);
196 + nhl->dl_refcount = 1;
197 + nhl->dl_value = value;
198 + nhl->dl_readers = 0;
199 + nhl->dl_writers = 0;
200 + nhl->dl_magic = DYNLOCK_MAGIC;
201 + init_waitqueue_head(&nhl->dl_wait);
203 + /* while lock is being allocated, someone else may allocate it
204 + * and put onto to list. check this situation
209 + if (lt == DLT_WRITE) {
210 + /* exclusive lock: user don't want to share lock at all
211 + * NOTE: one process may take the same lock several times
212 + * this functionaly is useful for rename operations */
213 + while ((hl->dl_writers && hl->dl_pid != current->pid) ||
215 + spin_unlock(&dl->dl_list_lock);
216 + wait_event(hl->dl_wait,
217 + hl->dl_writers == 0 && hl->dl_readers == 0);
218 + spin_lock(&dl->dl_list_lock);
222 + /* shared lock: user do not want to share lock with writer */
223 + while (hl->dl_writers) {
224 + spin_unlock(&dl->dl_list_lock);
225 + wait_event(hl->dl_wait, hl->dl_writers == 0);
226 + spin_lock(&dl->dl_list_lock);
230 + hl->dl_pid = current->pid;
231 + spin_unlock(&dl->dl_list_lock);
240 + * user have to specify lockspace (dl) and pointer to lock structure
241 + * returned by dynlock_lock()
244 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
248 + BUG_ON(dl == NULL);
249 + BUG_ON(hl == NULL);
250 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
252 + if (hl->dl_magic != DYNLOCK_MAGIC)
253 + printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dl_magic);
255 + BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
256 + BUG_ON(hl->dl_writers != 0 && current->pid != hl->dl_pid);
258 + spin_lock(&dl->dl_list_lock);
259 + if (hl->dl_writers) {
260 + BUG_ON(hl->dl_readers != 0);
262 + if (hl->dl_writers == 0)
264 + } else if (hl->dl_readers) {
266 + if (hl->dl_readers == 0)
273 + wake_up(&hl->dl_wait);
275 + if (--(hl->dl_refcount) == 0) {
276 + hl->dl_magic = DYNLOCK_MAGIC2;
277 + list_del(&hl->dl_list);
278 + kmem_cache_free(dynlock_cachep, hl);
280 + spin_unlock(&dl->dl_list_lock);
283 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
285 + struct dynlock_handle *hl;
289 + /* find requested lock in lockspace */
290 + spin_lock(&dl->dl_list_lock);
291 + BUG_ON(dl->dl_list.next == NULL);
292 + BUG_ON(dl->dl_list.prev == NULL);
293 + list_for_each_entry(hl, &dl->dl_list, dl_list) {
294 + BUG_ON(hl->dl_list.next == NULL);
295 + BUG_ON(hl->dl_list.prev == NULL);
296 + BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
297 + if (hl->dl_value == value && hl->dl_pid == current->pid) {
298 + /* lock is found */
303 + spin_unlock(&dl->dl_list_lock);
307 +EXPORT_SYMBOL(dynlock_init);
308 +EXPORT_SYMBOL(dynlock_lock);
309 +EXPORT_SYMBOL(dynlock_unlock);
310 +EXPORT_SYMBOL(dynlock_is_locked);