1 Index: linux-stage/fs/ext4/dynlocks.c
2 ===================================================================
4 +++ linux-stage/fs/ext4/dynlocks.c
9 + * struct dynlock is lockspace
10 + * one may request lock (exclusive or shared) for some value
15 +#include <linux/dynlocks.h>
16 +#include <linux/module.h>
17 +#include <linux/slab.h>
18 +#include <linux/sched.h>
20 +#define DYNLOCK_HANDLE_MAGIC 0xd19a10c
21 +#define DYNLOCK_HANDLE_DEAD 0xd1956ee
22 +#define DYNLOCK_LIST_MAGIC 0x11ee91e6
24 +static struct kmem_cache * dynlock_cachep = NULL;
26 +struct dynlock_handle {
28 + struct list_head dh_list;
29 + unsigned long dh_value; /* lock value */
30 + int dh_refcount; /* number of users */
33 + int dh_pid; /* holder of the lock */
34 + wait_queue_head_t dh_wait;
37 +int __init dynlock_cache_init(void)
41 + /* printk(KERN_INFO "init dynlocks cache\n"); */
42 + dynlock_cachep = kmem_cache_create("dynlock_cache",
43 + sizeof(struct dynlock_handle),
47 + if (dynlock_cachep == NULL) {
48 + printk(KERN_ERR "Not able to create dynlock cache");
54 +void dynlock_cache_exit(void)
56 + /* printk(KERN_INFO "exit dynlocks cache\n"); */
57 + kmem_cache_destroy(dynlock_cachep);
63 + * initialize lockspace
66 +void dynlock_init(struct dynlock *dl)
68 + spin_lock_init(&dl->dl_list_lock);
69 + INIT_LIST_HEAD(&dl->dl_list);
70 + dl->dl_magic = DYNLOCK_LIST_MAGIC;
72 +EXPORT_SYMBOL(dynlock_init);
77 + * acquires lock (exclusive or shared) in specified lockspace
78 + * each lock in lockspace is allocated separately, so user have
79 + * to specify GFP flags.
80 + * routine returns pointer to lock. this pointer is intended to
81 + * be passed to dynlock_unlock
84 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
85 + enum dynlock_type lt, gfp_t gfp)
87 + struct dynlock_handle *nhl = NULL;
88 + struct dynlock_handle *hl;
91 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
94 + /* find requested lock in lockspace */
95 + spin_lock(&dl->dl_list_lock);
96 + BUG_ON(dl->dl_list.next == NULL);
97 + BUG_ON(dl->dl_list.prev == NULL);
98 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
99 + BUG_ON(hl->dh_list.next == NULL);
100 + BUG_ON(hl->dh_list.prev == NULL);
101 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
102 + if (hl->dh_value == value) {
103 + /* lock is found */
105 + /* someone else just allocated
106 + * lock we didn't find and just created
107 + * so, we drop our lock
109 + kmem_cache_free(dynlock_cachep, nhl);
116 + /* lock not found */
118 + /* we already have allocated lock. use it */
121 + list_add(&hl->dh_list, &dl->dl_list);
124 + spin_unlock(&dl->dl_list_lock);
126 + /* lock not found and we haven't allocated lock yet. allocate it */
127 + nhl = kmem_cache_alloc(dynlock_cachep, gfp);
130 + nhl->dh_refcount = 1;
131 + nhl->dh_value = value;
132 + nhl->dh_readers = 0;
133 + nhl->dh_writers = 0;
134 + nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
135 + init_waitqueue_head(&nhl->dh_wait);
137 + /* while lock is being allocated, someone else may allocate it
138 + * and put onto to list. check this situation
143 + if (lt == DLT_WRITE) {
144 + /* exclusive lock: user don't want to share lock at all
145 + * NOTE: one process may take the same lock several times
146 + * this functionaly is useful for rename operations */
147 + while ((hl->dh_writers && hl->dh_pid != current->pid) ||
149 + spin_unlock(&dl->dl_list_lock);
150 + wait_event(hl->dh_wait,
151 + hl->dh_writers == 0 && hl->dh_readers == 0);
152 + spin_lock(&dl->dl_list_lock);
156 + /* shared lock: user do not want to share lock with writer */
157 + while (hl->dh_writers) {
158 + spin_unlock(&dl->dl_list_lock);
159 + wait_event(hl->dh_wait, hl->dh_writers == 0);
160 + spin_lock(&dl->dl_list_lock);
164 + hl->dh_pid = current->pid;
165 + spin_unlock(&dl->dl_list_lock);
169 +EXPORT_SYMBOL(dynlock_lock);
175 + * user have to specify lockspace (dl) and pointer to lock structure
176 + * returned by dynlock_lock()
179 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
183 + BUG_ON(dl == NULL);
184 + BUG_ON(hl == NULL);
185 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
187 + if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
188 + printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
190 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
191 + BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
193 + spin_lock(&dl->dl_list_lock);
194 + if (hl->dh_writers) {
195 + BUG_ON(hl->dh_readers != 0);
197 + if (hl->dh_writers == 0)
199 + } else if (hl->dh_readers) {
201 + if (hl->dh_readers == 0)
208 + wake_up(&hl->dh_wait);
210 + if (--(hl->dh_refcount) == 0) {
211 + hl->dh_magic = DYNLOCK_HANDLE_DEAD;
212 + list_del(&hl->dh_list);
213 + kmem_cache_free(dynlock_cachep, hl);
215 + spin_unlock(&dl->dl_list_lock);
217 +EXPORT_SYMBOL(dynlock_unlock);
219 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
221 + struct dynlock_handle *hl;
224 + /* find requested lock in lockspace */
225 + spin_lock(&dl->dl_list_lock);
226 + BUG_ON(dl->dl_list.next == NULL);
227 + BUG_ON(dl->dl_list.prev == NULL);
228 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
229 + BUG_ON(hl->dh_list.next == NULL);
230 + BUG_ON(hl->dh_list.prev == NULL);
231 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
232 + if (hl->dh_value == value && hl->dh_pid == current->pid) {
233 + /* lock is found */
238 + spin_unlock(&dl->dl_list_lock);
241 +EXPORT_SYMBOL(dynlock_is_locked);
242 Index: linux-stage/include/linux/dynlocks.h
243 ===================================================================
245 +++ linux-stage/include/linux/dynlocks.h
247 +#ifndef _LINUX_DYNLOCKS_H
248 +#define _LINUX_DYNLOCKS_H
250 +#include <linux/list.h>
251 +#include <linux/wait.h>
253 +struct dynlock_handle;
256 + * lock's namespace:
258 + * - lock to protect this list
262 + struct list_head dl_list;
263 + spinlock_t dl_list_lock;
271 +int dynlock_cache_init(void);
272 +void dynlock_cache_exit(void);
273 +void dynlock_init(struct dynlock *dl);
274 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
275 + enum dynlock_type lt, gfp_t gfp);
276 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
277 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
281 Index: linux-stage/fs/ext4/Makefile
282 ===================================================================
283 --- linux-stage.orig/fs/ext4/Makefile
284 +++ linux-stage/fs/ext4/Makefile
285 @@ -7,7 +7,7 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
286 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
287 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
288 ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
292 ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
293 ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
294 Index: linux-stage/fs/ext4/super.c
295 ===================================================================
296 --- linux-stage.orig/fs/ext4/super.c
297 +++ linux-stage/fs/ext4/super.c
298 @@ -4620,20 +4620,23 @@ static int __init init_ext4_fs(void)
300 ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
304 ext4_proc_root = proc_mkdir("fs/ext4", NULL);
306 err = ext4_init_feat_adverts();
308 err = init_ext4_mballoc();
313 err = init_ext4_xattr();
317 err = init_inodecache();
320 + err = dynlock_cache_init();
323 err = register_filesystem(&ext4_fs_type);
325 @@ -4643,16 +4646,18 @@ static int __init init_ext4_fs(void)
326 mutex_init(&ext4_li_mtx);
329 - destroy_inodecache();
330 + dynlock_cache_exit();
333 + destroy_inodecache();
335 - exit_ext4_mballoc();
338 + exit_ext4_mballoc();
340 ext4_exit_feat_adverts();
341 remove_proc_entry("fs/ext4", NULL);
342 kset_unregister(ext4_kset);
345 exit_ext4_system_zone();
348 @@ -4661,6 +4666,7 @@ static void __exit exit_ext4_fs(void)
350 ext4_destroy_lazyinit_thread();
351 unregister_filesystem(&ext4_fs_type);
352 + dynlock_cache_exit();
353 destroy_inodecache();