Whamcloud - gitweb
Add journal checksum feature.
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.6.9.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 Index: linux/fs/dcache.c
7 ===================================================================
8 --- linux.orig/fs/dcache.c
9 +++ linux/fs/dcache.c
10 @@ -1678,6 +1678,7 @@ EXPORT_SYMBOL(d_genocide);
11  
12  extern void bdev_cache_init(void);
13  extern void chrdev_init(void);
14 +extern void dynlock_cache_init(void);
15  
16  void __init vfs_caches_init_early(void)
17  {
18 @@ -1707,6 +1708,7 @@ void __init vfs_caches_init(unsigned lon
19         mnt_init(mempages);
20         bdev_cache_init();
21         chrdev_init();
22 +       dynlock_cache_init();
23  }
24  
25  EXPORT_SYMBOL(d_alloc);
26 Index: linux/include/linux/dynlocks.h
27 ===================================================================
28 --- linux.orig/include/linux/dynlocks.h
29 +++ linux/include/linux/dynlocks.h
30 @@ -0,0 +1,37 @@
31 +#ifndef _LINUX_DYNLOCKS_H
32 +#define _LINUX_DYNLOCKS_H
33 +
34 +#include <linux/list.h>
35 +#include <linux/wait.h>
36 +
37 +#define DYNLOCK_MAGIC          0xd19a10c
38 +#define DYNLOCK_MAGIC2         0xd1956ee
39 +
40 +struct dynlock;
41 +struct dynlock_handle;
42 +
43 +/*
44 + * lock's namespace:
45 + *   - list of locks
46 + *   - lock to protect this list
47 + */
48 +
49 +struct dynlock {
50 +       unsigned dl_magic;
51 +       struct list_head dl_list;
52 +       spinlock_t dl_list_lock;
53 +};
54 +
55 +enum dynlock_type {
56 +       DLT_WRITE,
57 +       DLT_READ
58 +};
59 +
60 +void dynlock_init(struct dynlock *dl);
61 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
62 +                                   enum dynlock_type lt, int gfp);
63 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
64 +
65 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
66 +#endif
67 +
68 Index: linux/lib/Makefile
69 ===================================================================
70 --- linux.orig/lib/Makefile
71 +++ linux/lib/Makefile
72 @@ -6,7 +6,7 @@
73  lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
74          bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
75          kobject.o kref.o idr.o div64.o parser.o int_sqrt.o \
76 -        bitmap.o extable.o
77 +        bitmap.o extable.o dynlocks.o
78  
79  obj-y := sort.o
80  
81 Index: linux/lib/dynlocks.c
82 ===================================================================
83 --- linux.orig/lib/dynlocks.c
84 +++ linux/lib/dynlocks.c
85 @@ -0,0 +1,226 @@
86 +/*
87 + * Dynamic Locks
88 + *
89 + * struct dynlock is lockspace
90 + * one may request lock (exclusive or shared) for some value
91 + * in that lockspace
92 + *
93 + */
94 +
95 +#include <linux/dynlocks.h>
96 +#include <linux/module.h>
97 +#include <linux/slab.h>
98 +#include <linux/sched.h>
99 +
100 +static kmem_cache_t * dynlock_cachep = NULL;
101 +
102 +struct dynlock_handle {
103 +       unsigned                dl_magic;
104 +       struct list_head        dl_list;
105 +       unsigned long           dl_value;       /* lock value */
106 +       int                     dl_refcount;    /* number of users */
107 +       int                     dl_readers;
108 +       int                     dl_writers;
109 +       int                     dl_pid;         /* holder of the lock */
110 +       wait_queue_head_t       dl_wait;
111 +};
112 +
113 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
114 +
115 +void __init dynlock_cache_init(void)
116 +{
117 +       printk(KERN_INFO "init dynlocks cache\n");
118 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
119 +                                        sizeof(struct dynlock_handle),
120 +                                        0,
121 +                                        SLAB_HWCACHE_ALIGN,
122 +                                        NULL, NULL);
123 +       if (dynlock_cachep == NULL)
124 +               panic("Can't create dynlock cache");
125 +}
126 +
127 +/*
128 + * dynlock_init
129 + *
130 + * initialize lockspace
131 + *
132 + */
133 +void dynlock_init(struct dynlock *dl)
134 +{
135 +       spin_lock_init(&dl->dl_list_lock);
136 +       INIT_LIST_HEAD(&dl->dl_list);
137 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
138 +}
139 +
140 +/*
141 + * dynlock_lock
142 + *
143 + * acquires lock (exclusive or shared) in specified lockspace
144 + * each lock in lockspace is allocated separately, so user have
145 + * to specify GFP flags.
146 + * routine returns pointer to lock. this pointer is intended to
147 + * be passed to dynlock_unlock
148 + *
149 + */
150 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
151 +                                   enum dynlock_type lt, int gfp)
152 +{
153 +       struct dynlock_handle *nhl = NULL;
154 +       struct dynlock_handle *hl;
155 +
156 +       BUG_ON(dl == NULL);
157 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
158 +
159 +repeat:
160 +       /* find requested lock in lockspace */
161 +       spin_lock(&dl->dl_list_lock);
162 +       BUG_ON(dl->dl_list.next == NULL);
163 +       BUG_ON(dl->dl_list.prev == NULL);
164 +       list_for_each_entry(hl, &dl->dl_list, dl_list) {
165 +               BUG_ON(hl->dl_list.next == NULL);
166 +               BUG_ON(hl->dl_list.prev == NULL);
167 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
168 +               if (hl->dl_value == value) {
169 +                       /* lock is found */
170 +                       if (nhl) {
171 +                               /* someone else just allocated
172 +                                * lock we didn't find and just created
173 +                                * so, we drop our lock
174 +                                */
175 +                               kmem_cache_free(dynlock_cachep, nhl);
176 +                               nhl = NULL;
177 +                       }
178 +                       hl->dl_refcount++;
179 +                       goto found;
180 +               }
181 +       }
182 +       /* lock not found */
183 +       if (nhl) {
184 +               /* we already have allocated lock. use it */
185 +               hl = nhl;
186 +               nhl = NULL;
187 +               list_add(&hl->dl_list, &dl->dl_list);
188 +               goto found;
189 +       }
190 +       spin_unlock(&dl->dl_list_lock);
191 +       
192 +       /* lock not found and we haven't allocated lock yet. allocate it */
193 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
194 +       if (nhl == NULL)
195 +               return NULL;
196 +       nhl->dl_refcount = 1;
197 +       nhl->dl_value = value;
198 +       nhl->dl_readers = 0;
199 +       nhl->dl_writers = 0;
200 +       nhl->dl_magic = DYNLOCK_MAGIC;
201 +       init_waitqueue_head(&nhl->dl_wait);
202 +
203 +       /* while lock is being allocated, someone else may allocate it
204 +        * and put onto to list. check this situation
205 +        */
206 +       goto repeat;
207 +
208 +found:
209 +       if (lt == DLT_WRITE) {
210 +               /* exclusive lock: user don't want to share lock at all
211 +                * NOTE: one process may take the same lock several times
212 +                * this functionaly is useful for rename operations */
213 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
214 +                               hl->dl_readers) {
215 +                       spin_unlock(&dl->dl_list_lock);
216 +                       wait_event(hl->dl_wait,
217 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
218 +                       spin_lock(&dl->dl_list_lock);
219 +               }
220 +               hl->dl_writers++;
221 +       } else {
222 +               /* shared lock: user do not want to share lock with writer */
223 +               while (hl->dl_writers) {
224 +                       spin_unlock(&dl->dl_list_lock);
225 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
226 +                       spin_lock(&dl->dl_list_lock);
227 +               }
228 +               hl->dl_readers++;
229 +       }
230 +       hl->dl_pid = current->pid;
231 +       spin_unlock(&dl->dl_list_lock);
232 +
233 +       return hl;
234 +}
235 +
236 +
237 +/*
238 + * dynlock_unlock
239 + *
240 + * user have to specify lockspace (dl) and pointer to lock structure
241 + * returned by dynlock_lock()
242 + *
243 + */
244 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
245 +{
246 +       int wakeup = 0;
247 +       
248 +       BUG_ON(dl == NULL);
249 +       BUG_ON(hl == NULL);
250 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
251 +
252 +       if (hl->dl_magic != DYNLOCK_MAGIC)
253 +               printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dl_magic);
254 +
255 +       BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
256 +       BUG_ON(hl->dl_writers != 0 && current->pid != hl->dl_pid);
257 +
258 +       spin_lock(&dl->dl_list_lock);
259 +       if (hl->dl_writers) {
260 +               BUG_ON(hl->dl_readers != 0);
261 +               hl->dl_writers--;
262 +               if (hl->dl_writers == 0)
263 +                       wakeup = 1;
264 +       } else if (hl->dl_readers) {
265 +               hl->dl_readers--;
266 +               if (hl->dl_readers == 0)
267 +                       wakeup = 1;
268 +       } else {
269 +               BUG();
270 +       }
271 +       if (wakeup) {
272 +               hl->dl_pid = 0;
273 +               wake_up(&hl->dl_wait);
274 +       }
275 +       if (--(hl->dl_refcount) == 0) {
276 +               hl->dl_magic = DYNLOCK_MAGIC2;
277 +               list_del(&hl->dl_list);
278 +               kmem_cache_free(dynlock_cachep, hl);
279 +       }
280 +       spin_unlock(&dl->dl_list_lock);
281 +}
282 +
283 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
284 +{
285 +       struct dynlock_handle *hl;
286 +       int result;
287 +
288 +       result = 0;
289 +       /* find requested lock in lockspace */
290 +       spin_lock(&dl->dl_list_lock);
291 +       BUG_ON(dl->dl_list.next == NULL);
292 +       BUG_ON(dl->dl_list.prev == NULL);
293 +       list_for_each_entry(hl, &dl->dl_list, dl_list) {
294 +               BUG_ON(hl->dl_list.next == NULL);
295 +               BUG_ON(hl->dl_list.prev == NULL);
296 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
297 +               if (hl->dl_value == value && hl->dl_pid == current->pid) {
298 +                       /* lock is found */
299 +                       result = 1;
300 +                       break;
301 +               }
302 +       }
303 +       spin_unlock(&dl->dl_list_lock);
304 +       return result;
305 +}
306 +
307 +EXPORT_SYMBOL(dynlock_init);
308 +EXPORT_SYMBOL(dynlock_lock);
309 +EXPORT_SYMBOL(dynlock_unlock);
310 +EXPORT_SYMBOL(dynlock_is_locked);
311 +