1 diff -rupN 2.6.27.21/fs/ext4/dynlocks.c 2.6.27.21_1//fs/ext4/dynlocks.c
2 --- 2.6.27.21/fs/ext4/dynlocks.c 1970-01-01 05:30:00.000000000 +0530
3 +++ 2.6.27.21_1//fs/ext4/dynlocks.c 2009-08-23 10:39:59.000000000 +0530
8 + * struct dynlock is lockspace
9 + * one may request lock (exclusive or shared) for some value
14 +#include <linux/dynlocks.h>
15 +#include <linux/module.h>
16 +#include <linux/slab_def.h>
17 +#include <linux/sched.h>
19 +#define DYNLOCK_HANDLE_MAGIC 0xd19a10c
20 +#define DYNLOCK_HANDLE_DEAD 0xd1956ee
21 +#define DYNLOCK_LIST_MAGIC 0x11ee91e6
23 +typedef struct kmem_cache kmem_cache_t;
25 +static kmem_cache_t * dynlock_cachep = NULL;
27 +struct dynlock_handle {
29 + struct list_head dh_list;
30 + unsigned long dh_value; /* lock value */
31 + int dh_refcount; /* number of users */
34 + int dh_pid; /* holder of the lock */
35 + wait_queue_head_t dh_wait;
38 +int __init dynlock_cache_init(void)
42 + /* printk(KERN_INFO "init dynlocks cache\n"); */
43 + dynlock_cachep = kmem_cache_create("dynlock_cache",
44 + sizeof(struct dynlock_handle),
48 + if (dynlock_cachep == NULL) {
49 + printk(KERN_ERR "Not able to create dynlock cache");
55 +void __exit dynlock_cache_exit(void)
57 + /* printk(KERN_INFO "exit dynlocks cache\n"); */
58 + kmem_cache_destroy(dynlock_cachep);
64 + * initialize lockspace
67 +void dynlock_init(struct dynlock *dl)
69 + spin_lock_init(&dl->dl_list_lock);
70 + INIT_LIST_HEAD(&dl->dl_list);
71 + dl->dl_magic = DYNLOCK_LIST_MAGIC;
73 +EXPORT_SYMBOL(dynlock_init);
78 + * acquires lock (exclusive or shared) in specified lockspace
79 + * each lock in lockspace is allocated separately, so user have
80 + * to specify GFP flags.
81 + * routine returns pointer to lock. this pointer is intended to
82 + * be passed to dynlock_unlock
85 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
86 + enum dynlock_type lt, gfp_t gfp)
88 + struct dynlock_handle *nhl = NULL;
89 + struct dynlock_handle *hl;
92 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
95 + /* find requested lock in lockspace */
96 + spin_lock(&dl->dl_list_lock);
97 + BUG_ON(dl->dl_list.next == NULL);
98 + BUG_ON(dl->dl_list.prev == NULL);
99 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
100 + BUG_ON(hl->dh_list.next == NULL);
101 + BUG_ON(hl->dh_list.prev == NULL);
102 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
103 + if (hl->dh_value == value) {
104 + /* lock is found */
106 + /* someone else just allocated
107 + * lock we didn't find and just created
108 + * so, we drop our lock
110 + kmem_cache_free(dynlock_cachep, nhl);
117 + /* lock not found */
119 + /* we already have allocated lock. use it */
122 + list_add(&hl->dh_list, &dl->dl_list);
125 + spin_unlock(&dl->dl_list_lock);
127 + /* lock not found and we haven't allocated lock yet. allocate it */
128 + nhl = kmem_cache_alloc(dynlock_cachep, gfp);
131 + nhl->dh_refcount = 1;
132 + nhl->dh_value = value;
133 + nhl->dh_readers = 0;
134 + nhl->dh_writers = 0;
135 + nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
136 + init_waitqueue_head(&nhl->dh_wait);
138 + /* while lock is being allocated, someone else may allocate it
139 + * and put onto to list. check this situation
144 + if (lt == DLT_WRITE) {
145 + /* exclusive lock: user don't want to share lock at all
146 + * NOTE: one process may take the same lock several times
147 + * this functionaly is useful for rename operations */
148 + while ((hl->dh_writers && hl->dh_pid != current->pid) ||
150 + spin_unlock(&dl->dl_list_lock);
151 + wait_event(hl->dh_wait,
152 + hl->dh_writers == 0 && hl->dh_readers == 0);
153 + spin_lock(&dl->dl_list_lock);
157 + /* shared lock: user do not want to share lock with writer */
158 + while (hl->dh_writers) {
159 + spin_unlock(&dl->dl_list_lock);
160 + wait_event(hl->dh_wait, hl->dh_writers == 0);
161 + spin_lock(&dl->dl_list_lock);
165 + hl->dh_pid = current->pid;
166 + spin_unlock(&dl->dl_list_lock);
170 +EXPORT_SYMBOL(dynlock_lock);
176 + * user have to specify lockspace (dl) and pointer to lock structure
177 + * returned by dynlock_lock()
180 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
184 + BUG_ON(dl == NULL);
185 + BUG_ON(hl == NULL);
186 + BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
188 + if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
189 + printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
191 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
192 + BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
194 + spin_lock(&dl->dl_list_lock);
195 + if (hl->dh_writers) {
196 + BUG_ON(hl->dh_readers != 0);
198 + if (hl->dh_writers == 0)
200 + } else if (hl->dh_readers) {
202 + if (hl->dh_readers == 0)
209 + wake_up(&hl->dh_wait);
211 + if (--(hl->dh_refcount) == 0) {
212 + hl->dh_magic = DYNLOCK_HANDLE_DEAD;
213 + list_del(&hl->dh_list);
214 + kmem_cache_free(dynlock_cachep, hl);
216 + spin_unlock(&dl->dl_list_lock);
218 +EXPORT_SYMBOL(dynlock_unlock);
220 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
222 + struct dynlock_handle *hl;
225 + /* find requested lock in lockspace */
226 + spin_lock(&dl->dl_list_lock);
227 + BUG_ON(dl->dl_list.next == NULL);
228 + BUG_ON(dl->dl_list.prev == NULL);
229 + list_for_each_entry(hl, &dl->dl_list, dh_list) {
230 + BUG_ON(hl->dh_list.next == NULL);
231 + BUG_ON(hl->dh_list.prev == NULL);
232 + BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
233 + if (hl->dh_value == value && hl->dh_pid == current->pid) {
234 + /* lock is found */
239 + spin_unlock(&dl->dl_list_lock);
242 +EXPORT_SYMBOL(dynlock_is_locked);
243 diff -rupN 2.6.27.21/include/linux/dynlocks.h 2.6.27.21_1//include/linux/dynlocks.h
244 --- 2.6.27.21/include/linux/dynlocks.h 1970-01-01 05:30:00.000000000 +0530
245 +++ 2.6.27.21_1//include/linux/dynlocks.h 2009-08-23 10:40:07.000000000 +0530
247 +#ifndef _LINUX_DYNLOCKS_H
248 +#define _LINUX_DYNLOCKS_H
250 +#include <linux/list.h>
251 +#include <linux/wait.h>
253 +struct dynlock_handle;
256 + * lock's namespace:
258 + * - lock to protect this list
262 + struct list_head dl_list;
263 + spinlock_t dl_list_lock;
271 +int dynlock_cache_init(void);
272 +void dynlock_cache_exit(void);
273 +void dynlock_init(struct dynlock *dl);
274 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
275 + enum dynlock_type lt, gfp_t gfp);
276 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
277 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);