1 diff -rupN linux-2.6.18-128.1.6_1/fs/ext4/dynlocks.c linux-2.6.18-128.1.6_2/fs/ext4/dynlocks.c
2 --- linux-2.6.18-128.1.6_1/fs/ext4/dynlocks.c 1970-01-01 05:30:00.000000000 +0530
3 +++ linux-2.6.18-128.1.6_2/fs/ext4/dynlocks.c 2009-08-13 20:42:59.000000000 +0530
8 + * struct dynlock is lockspace
9 + * one may request lock (exclusive or shared) for some value
14 +#include <linux/dynlocks.h>
15 +#include <linux/module.h>
16 +#include <linux/slab.h>
17 +#include <linux/sched.h>
19 +#define DYNLOCK_HANDLE_MAGIC 0xd19a10c
20 +#define DYNLOCK_HANDLE_DEAD 0xd1956ee
21 +#define DYNLOCK_LIST_MAGIC 0x11ee91e6
23 +static kmem_cache_t * dynlock_cachep = NULL;
25 +struct dynlock_handle {
27 + struct list_head dh_list;
28 + unsigned long dh_value; /* lock value */
29 + int dh_refcount; /* number of users */
32 + int dh_pid; /* holder of the lock */
33 + wait_queue_head_t dh_wait;
36 +int __init dynlock_cache_init(void)
40 + /* printk(KERN_INFO "init dynlocks cache\n"); */
41 + dynlock_cachep = kmem_cache_create("dynlock_cache",
42 + sizeof(struct dynlock_handle),
46 + if (dynlock_cachep == NULL) {
47 + printk(KERN_ERR "Not able to create dynlock cache");
53 +void dynlock_cache_exit(void)
55 + /* printk(KERN_INFO "exit dynlocks cache\n"); */
56 + kmem_cache_destroy(dynlock_cachep);
62 + * initialize lockspace
65 +void dynlock_init(struct dynlock *dl)
67 + spin_lock_init(&dl->dl_list_lock);
68 + INIT_LIST_HEAD(&dl->dl_list);
69 + dl->dl_magic = DYNLOCK_LIST_MAGIC;
71 +EXPORT_SYMBOL(dynlock_init);
76 + * acquires lock (exclusive or shared) in specified lockspace
77 + * each lock in lockspace is allocated separately, so user have
78 + * to specify GFP flags.
79 + * routine returns pointer to lock. this pointer is intended to
80 + * be passed to dynlock_unlock
83 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
84 + enum dynlock_type lt, gfp_t gfp)
86 + struct dynlock_handle *nhl = NULL;
87 + struct dynlock_handle *hl;
90 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
93 + /* find requested lock in lockspace */
94 + spin_lock(&dl->dl_list_lock);
95 + BUG_ON(dl->dl_list.next == NULL);
96 + BUG_ON(dl->dl_list.prev == NULL);
97 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
98 + BUG_ON(hl->dh_list.next == NULL);
99 + BUG_ON(hl->dh_list.prev == NULL);
100 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
101 + if (hl->dh_value == value) {
102 + /* lock is found */
104 + /* someone else just allocated
105 + * lock we didn't find and just created
106 + * so, we drop our lock
108 + kmem_cache_free(dynlock_cachep, nhl);
115 + /* lock not found */
117 + /* we already have allocated lock. use it */
120 + list_add(&hl->dh_list, &dl->dl_list);
123 + spin_unlock(&dl->dl_list_lock);
125 + /* lock not found and we haven't allocated lock yet. allocate it */
126 + nhl = kmem_cache_alloc(dynlock_cachep, gfp);
129 + nhl->dh_refcount = 1;
130 + nhl->dh_value = value;
131 + nhl->dh_readers = 0;
132 + nhl->dh_writers = 0;
133 + nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
134 + init_waitqueue_head(&nhl->dh_wait);
136 + /* while lock is being allocated, someone else may allocate it
137 + * and put onto to list. check this situation
142 + if (lt == DLT_WRITE) {
143 + /* exclusive lock: user don't want to share lock at all
144 + * NOTE: one process may take the same lock several times
145 + * this functionaly is useful for rename operations */
146 + while ((hl->dh_writers && hl->dh_pid != current->pid) ||
148 + spin_unlock(&dl->dl_list_lock);
149 + wait_event(hl->dh_wait,
150 + hl->dh_writers == 0 && hl->dh_readers == 0);
151 + spin_lock(&dl->dl_list_lock);
155 + /* shared lock: user do not want to share lock with writer */
156 + while (hl->dh_writers) {
157 + spin_unlock(&dl->dl_list_lock);
158 + wait_event(hl->dh_wait, hl->dh_writers == 0);
159 + spin_lock(&dl->dl_list_lock);
163 + hl->dh_pid = current->pid;
164 + spin_unlock(&dl->dl_list_lock);
168 +EXPORT_SYMBOL(dynlock_lock);
174 + * user have to specify lockspace (dl) and pointer to lock structure
175 + * returned by dynlock_lock()
178 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
182 + BUG_ON(dl == NULL);
183 + BUG_ON(hl == NULL);
184 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
186 + if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
187 + printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
189 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
190 + BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
192 + spin_lock(&dl->dl_list_lock);
193 + if (hl->dh_writers) {
194 + BUG_ON(hl->dh_readers != 0);
196 + if (hl->dh_writers == 0)
198 + } else if (hl->dh_readers) {
200 + if (hl->dh_readers == 0)
207 + wake_up(&hl->dh_wait);
209 + if (--(hl->dh_refcount) == 0) {
210 + hl->dh_magic = DYNLOCK_HANDLE_DEAD;
211 + list_del(&hl->dh_list);
212 + kmem_cache_free(dynlock_cachep, hl);
214 + spin_unlock(&dl->dl_list_lock);
216 +EXPORT_SYMBOL(dynlock_unlock);
218 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
220 + struct dynlock_handle *hl;
223 + /* find requested lock in lockspace */
224 + spin_lock(&dl->dl_list_lock);
225 + BUG_ON(dl->dl_list.next == NULL);
226 + BUG_ON(dl->dl_list.prev == NULL);
227 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
228 + BUG_ON(hl->dh_list.next == NULL);
229 + BUG_ON(hl->dh_list.prev == NULL);
230 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
231 + if (hl->dh_value == value && hl->dh_pid == current->pid) {
232 + /* lock is found */
237 + spin_unlock(&dl->dl_list_lock);
240 +EXPORT_SYMBOL(dynlock_is_locked);
241 diff -rupN linux-2.6.18-128.1.6_1/include/linux/dynlocks.h linux-2.6.18-128.1.6_2/include/linux/dynlocks.h
242 --- linux-2.6.18-128.1.6_1/include/linux/dynlocks.h 1970-01-01 05:30:00.000000000 +0530
243 +++ linux-2.6.18-128.1.6_2/include/linux/dynlocks.h 2009-08-13 20:43:18.000000000 +0530
245 +#ifndef _LINUX_DYNLOCKS_H
246 +#define _LINUX_DYNLOCKS_H
248 +#include <linux/list.h>
249 +#include <linux/wait.h>
251 +struct dynlock_handle;
254 + * lock's namespace:
256 + * - lock to protect this list
260 + struct list_head dl_list;
261 + spinlock_t dl_list_lock;
269 +int dynlock_cache_init(void);
270 +void dynlock_cache_exit(void);
271 +void dynlock_init(struct dynlock *dl);
272 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
273 + enum dynlock_type lt, gfp_t gfp);
274 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
275 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);