2 +++ b/fs/ext4/dynlocks.c
7 + * struct dynlock is lockspace
8 + * one may request lock (exclusive or shared) for some value
13 +#include <linux/dynlocks.h>
14 +#include <linux/module.h>
15 +#include <linux/slab.h>
16 +#include <linux/sched.h>
18 +#define DYNLOCK_HANDLE_MAGIC 0xd19a10c
19 +#define DYNLOCK_HANDLE_DEAD 0xd1956ee
20 +#define DYNLOCK_LIST_MAGIC 0x11ee91e6
22 +static struct kmem_cache * dynlock_cachep = NULL;
24 +struct dynlock_handle {
26 + struct list_head dh_list;
27 + unsigned long dh_value; /* lock value */
28 + int dh_refcount; /* number of users */
31 + int dh_pid; /* holder of the lock */
32 + wait_queue_head_t dh_wait;
35 +int __init dynlock_cache_init(void)
39 + /* printk(KERN_INFO "init dynlocks cache\n"); */
40 + dynlock_cachep = kmem_cache_create("dynlock_cache",
41 + sizeof(struct dynlock_handle),
45 + if (dynlock_cachep == NULL) {
46 + printk(KERN_ERR "Not able to create dynlock cache");
52 +void dynlock_cache_exit(void)
54 + /* printk(KERN_INFO "exit dynlocks cache\n"); */
55 + kmem_cache_destroy(dynlock_cachep);
61 + * initialize lockspace
64 +void dynlock_init(struct dynlock *dl)
66 + spin_lock_init(&dl->dl_list_lock);
67 + INIT_LIST_HEAD(&dl->dl_list);
68 + dl->dl_magic = DYNLOCK_LIST_MAGIC;
70 +EXPORT_SYMBOL(dynlock_init);
75 + * acquires lock (exclusive or shared) in specified lockspace
76 + * each lock in lockspace is allocated separately, so user have
77 + * to specify GFP flags.
78 + * routine returns pointer to lock. this pointer is intended to
79 + * be passed to dynlock_unlock
82 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
83 + enum dynlock_type lt, gfp_t gfp)
85 + struct dynlock_handle *nhl = NULL;
86 + struct dynlock_handle *hl;
89 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
92 + /* find requested lock in lockspace */
93 + spin_lock(&dl->dl_list_lock);
94 + BUG_ON(dl->dl_list.next == NULL);
95 + BUG_ON(dl->dl_list.prev == NULL);
96 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
97 + BUG_ON(hl->dh_list.next == NULL);
98 + BUG_ON(hl->dh_list.prev == NULL);
99 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
100 + if (hl->dh_value == value) {
101 + /* lock is found */
103 + /* someone else just allocated
104 + * lock we didn't find and just created
105 + * so, we drop our lock
107 + kmem_cache_free(dynlock_cachep, nhl);
114 + /* lock not found */
116 + /* we already have allocated lock. use it */
119 + list_add(&hl->dh_list, &dl->dl_list);
122 + spin_unlock(&dl->dl_list_lock);
124 + /* lock not found and we haven't allocated lock yet. allocate it */
125 + nhl = kmem_cache_alloc(dynlock_cachep, gfp);
128 + nhl->dh_refcount = 1;
129 + nhl->dh_value = value;
130 + nhl->dh_readers = 0;
131 + nhl->dh_writers = 0;
132 + nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
133 + init_waitqueue_head(&nhl->dh_wait);
135 + /* while lock is being allocated, someone else may allocate it
136 + * and put onto to list. check this situation
141 + if (lt == DLT_WRITE) {
142 + /* exclusive lock: user don't want to share lock at all
143 + * NOTE: one process may take the same lock several times
144 + * this functionaly is useful for rename operations */
145 + while ((hl->dh_writers && hl->dh_pid != current->pid) ||
147 + spin_unlock(&dl->dl_list_lock);
148 + wait_event(hl->dh_wait,
149 + hl->dh_writers == 0 && hl->dh_readers == 0);
150 + spin_lock(&dl->dl_list_lock);
154 + /* shared lock: user do not want to share lock with writer */
155 + while (hl->dh_writers) {
156 + spin_unlock(&dl->dl_list_lock);
157 + wait_event(hl->dh_wait, hl->dh_writers == 0);
158 + spin_lock(&dl->dl_list_lock);
162 + hl->dh_pid = current->pid;
163 + spin_unlock(&dl->dl_list_lock);
167 +EXPORT_SYMBOL(dynlock_lock);
173 + * user have to specify lockspace (dl) and pointer to lock structure
174 + * returned by dynlock_lock()
177 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
181 + BUG_ON(dl == NULL);
182 + BUG_ON(hl == NULL);
183 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
185 + if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
186 + printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
188 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
189 + BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
191 + spin_lock(&dl->dl_list_lock);
192 + if (hl->dh_writers) {
193 + BUG_ON(hl->dh_readers != 0);
195 + if (hl->dh_writers == 0)
197 + } else if (hl->dh_readers) {
199 + if (hl->dh_readers == 0)
206 + wake_up(&hl->dh_wait);
208 + if (--(hl->dh_refcount) == 0) {
209 + hl->dh_magic = DYNLOCK_HANDLE_DEAD;
210 + list_del(&hl->dh_list);
211 + kmem_cache_free(dynlock_cachep, hl);
213 + spin_unlock(&dl->dl_list_lock);
215 +EXPORT_SYMBOL(dynlock_unlock);
217 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
219 + struct dynlock_handle *hl;
222 + /* find requested lock in lockspace */
223 + spin_lock(&dl->dl_list_lock);
224 + BUG_ON(dl->dl_list.next == NULL);
225 + BUG_ON(dl->dl_list.prev == NULL);
226 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
227 + BUG_ON(hl->dh_list.next == NULL);
228 + BUG_ON(hl->dh_list.prev == NULL);
229 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
230 + if (hl->dh_value == value && hl->dh_pid == current->pid) {
231 + /* lock is found */
236 + spin_unlock(&dl->dl_list_lock);
239 +EXPORT_SYMBOL(dynlock_is_locked);
241 +++ b/include/linux/dynlocks.h
243 +#ifndef _LINUX_DYNLOCKS_H
244 +#define _LINUX_DYNLOCKS_H
246 +#include <linux/list.h>
247 +#include <linux/wait.h>
249 +struct dynlock_handle;
252 + * lock's namespace:
254 + * - lock to protect this list
258 + struct list_head dl_list;
259 + spinlock_t dl_list_lock;
267 +int dynlock_cache_init(void);
268 +void dynlock_cache_exit(void);
269 +void dynlock_init(struct dynlock *dl);
270 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
271 + enum dynlock_type lt, gfp_t gfp);
272 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
273 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
277 --- a/fs/ext4/super.c
278 +++ b/fs/ext4/super.c
279 @@ -5178,30 +5178,33 @@ static int __init ext4_init_fs(void)
281 err = ext4_init_system_zone();
285 ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
289 ext4_proc_root = proc_mkdir("fs/ext4", NULL);
294 err = ext4_init_feat_adverts();
299 err = ext4_init_mballoc();
304 err = ext4_init_xattr();
308 err = init_inodecache();
314 + err = dynlock_cache_init();
317 err = register_filesystem(&ext4_fs_type);
320 @@ -5210,22 +5213,24 @@ static int __init ext4_init_fs(void)
321 mutex_init(&ext4_li_mtx);
324 + dynlock_cache_exit();
326 unregister_as_ext2();
327 unregister_as_ext3();
328 destroy_inodecache();
332 - ext4_exit_mballoc();
335 - ext4_exit_feat_adverts();
336 + ext4_exit_mballoc();
338 - remove_proc_entry("fs/ext4", NULL);
339 + ext4_exit_feat_adverts();
341 - kset_unregister(ext4_kset);
342 + remove_proc_entry("fs/ext4", NULL);
344 - ext4_exit_system_zone();
345 + kset_unregister(ext4_kset);
347 + ext4_exit_system_zone();
352 @@ -5236,6 +5241,7 @@ static void __exit ext4_exit_fs(void)
353 unregister_as_ext2();
354 unregister_as_ext3();
355 unregister_filesystem(&ext4_fs_type);
356 + dynlock_cache_exit();
357 destroy_inodecache();
360 --- a/fs/ext4/Makefile
361 +++ b/fs/ext4/Makefile
362 @@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
363 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
364 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
365 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
369 ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
370 ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o