Whamcloud - gitweb
- landing of b_hd_cleanup_merge to HEAD.
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.4.24.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 Index: linux-2.4.24/include/linux/dynlocks.h
7 ===================================================================
8 --- linux-2.4.24.orig/include/linux/dynlocks.h  2003-01-30 13:24:37.000000000 +0300
9 +++ linux-2.4.24/include/linux/dynlocks.h       2004-07-18 11:09:33.000000000 +0400
10 @@ -0,0 +1,43 @@
11 +#ifndef _LINUX_DYNLOCKS_H
12 +#define _LINUX_DYNLOCKS_H
13 +
14 +#include <linux/list.h>
15 +#include <linux/wait.h>
16 +
17 +#define DYNLOCK_MAGIC          0xd19a10c
18 +#define DYNLOCK_MAGIC2         0xd1956ee
19 +
20 +struct dynlock;
21 +
22 +struct dynlock_member {
23 +       unsigned                dl_magic;
24 +       struct list_head        dl_list;
25 +       unsigned long           dl_value;       /* lock value */
26 +       int                     dl_refcount;    /* number of users */
27 +       int                     dl_readers;
28 +       int                     dl_writers;
29 +       int                     dl_pid;         /* holder of the lock */
30 +       wait_queue_head_t       dl_wait;
31 +};
32 +
33 +/*
34 + * lock's namespace:
35 + *   - list of locks
36 + *   - lock to protect this list
37 + */
38 +
39 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
40 +
41 +struct dynlock {
42 +       unsigned dl_magic;
43 +       struct list_head dl_list;
44 +       spinlock_t dl_list_lock;
45 +};
46 +
47 +void dynlock_init(struct dynlock *dl);
48 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp);
49 +void dynlock_unlock(struct dynlock *dl, void *lock);
50 +
51 +
52 +#endif
53 +
54 Index: linux-2.4.24/lib/dynlocks.c
55 ===================================================================
56 --- linux-2.4.24.orig/lib/dynlocks.c    2003-01-30 13:24:37.000000000 +0300
57 +++ linux-2.4.24/lib/dynlocks.c 2004-07-18 11:23:01.000000000 +0400
58 @@ -0,0 +1,188 @@
59 +/*
60 + * Dynamic Locks
61 + *
62 + * struct dynlock is lockspace
63 + * one may request lock (exclusive or shared) for some value
64 + * in that lockspace
65 + *
66 + */
67 +
68 +#include <linux/dynlocks.h>
69 +#include <linux/module.h>
70 +#include <linux/slab.h>
71 +#include <linux/sched.h>
72 +#include <linux/init.h>
73 +
74 +static kmem_cache_t * dynlock_cachep = NULL;
75 +
76 +void __init dynlock_cache_init(void)
77 +{
78 +       printk(KERN_INFO "init dynlocks cache\n");
79 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
80 +                                        sizeof(struct dynlock_member),
81 +                                        0,
82 +                                        SLAB_HWCACHE_ALIGN,
83 +                                        NULL, NULL);
84 +       if (dynlock_cachep == NULL)
85 +               panic("Can't create dynlock cache");
86 +}
87 +
88 +/*
89 + * dynlock_init
90 + *
91 + * initialize lockspace
92 + *
93 + */
94 +void dynlock_init(struct dynlock *dl)
95 +{
96 +       spin_lock_init(&dl->dl_list_lock);
97 +       INIT_LIST_HEAD(&dl->dl_list);
98 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
99 +}
100 +
101 +/*
102 + * dynlock_lock
103 + *
104 + * acquires lock (exclusive or shared) in specified lockspace
105 + * each lock in lockspace is allocated separately, so user have
106 + * to specify GFP flags.
107 + * routine returns pointer to lock. this pointer is intended to
108 + * be passed to dynlock_unlock
109 + *
110 + */
111 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp)
112 +{
113 +       struct dynlock_member *nhl = NULL; 
114 +       struct dynlock_member *hl; 
115 +       struct list_head *cur;
116 +       int num = 0;
117 +
118 +       BUG_ON(dl == NULL);
119 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
120 +repeat:
121 +       /* find requested lock in lockspace */
122 +       spin_lock(&dl->dl_list_lock);
123 +       BUG_ON(dl->dl_list.next == NULL);
124 +       BUG_ON(dl->dl_list.prev == NULL);
125 +       list_for_each(cur, &dl->dl_list) {
126 +               BUG_ON(cur->next == NULL);
127 +               BUG_ON(cur->prev == NULL);
128 +               hl = list_entry(cur, struct dynlock_member, dl_list);
129 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
130 +               if (hl->dl_value == value) {
131 +                       /* lock is found */
132 +                       if (nhl) {
133 +                               /* someone else just allocated
134 +                                * lock we didn't find and just created
135 +                                * so, we drop our lock
136 +                                */
137 +                               kmem_cache_free(dynlock_cachep, nhl);
138 +                               nhl = NULL;
139 +                       }
140 +                       hl->dl_refcount++;
141 +                       goto found;
142 +               }
143 +               num++;
144 +       }
145 +       /* lock not found */
146 +       if (nhl) {
147 +               /* we already have allocated lock. use it */
148 +               hl = nhl;
149 +               nhl = NULL;
150 +               list_add(&hl->dl_list, &dl->dl_list);
151 +               goto found;
152 +       }
153 +       spin_unlock(&dl->dl_list_lock);
154 +       
155 +       /* lock not found and we haven't allocated lock yet. allocate it */
156 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
157 +       if (nhl == NULL)
158 +               return NULL;
159 +       nhl->dl_refcount = 1;
160 +       nhl->dl_value = value;
161 +       nhl->dl_readers = 0;
162 +       nhl->dl_writers = 0;
163 +       nhl->dl_magic = DYNLOCK_MAGIC;
164 +       init_waitqueue_head(&nhl->dl_wait);
165 +
166 +       /* while lock is being allocated, someone else may allocate it
167 +        * and put onto to list. check this situation
168 +        */
169 +       goto repeat;
170 +
171 +found:
172 +       if (rw) {
173 +               /* exclusive lock: user don't want to share lock at all
174 +                * NOTE: one process may take the same lock several times
175 +                * this functionaly is useful for rename operations */
176 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
177 +                               hl->dl_readers) {
178 +                       spin_unlock(&dl->dl_list_lock);
179 +                       wait_event(hl->dl_wait,
180 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
181 +                       spin_lock(&dl->dl_list_lock);
182 +               }
183 +               hl->dl_writers++;
184 +       } else {
185 +               /* shared lock: user do not want to share lock with writer */
186 +               while (hl->dl_writers) {
187 +                       spin_unlock(&dl->dl_list_lock);
188 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
189 +                       spin_lock(&dl->dl_list_lock);
190 +               }
191 +               hl->dl_readers++;
192 +       }
193 +       hl->dl_pid = current->pid;
194 +       spin_unlock(&dl->dl_list_lock);
195 +
196 +       return hl;
197 +}
198 +
199 +
200 +/*
201 + * dynlock_unlock
202 + *
203 + * user have to specify lockspace (dl) and pointer to lock structure
204 + * returned by dynlock_lock()
205 + *
206 + */
207 +void dynlock_unlock(struct dynlock *dl, void *lock)
208 +{
209 +       struct dynlock_member *hl = lock;
210 +       int wakeup = 0;
211 +       
212 +       BUG_ON(dl == NULL);
213 +       BUG_ON(hl == NULL);
214 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
215 +       BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
216 +       BUG_ON(current->pid != hl->dl_pid);
217 +
218 +       spin_lock(&dl->dl_list_lock);
219 +       if (hl->dl_writers) {
220 +               BUG_ON(hl->dl_readers > 0 || hl->dl_readers < 0);
221 +               hl->dl_writers--;
222 +               if (hl->dl_writers == 0)
223 +                       wakeup = 1;
224 +       } else if (hl->dl_readers) {
225 +               hl->dl_readers--;
226 +               if (hl->dl_readers == 0)
227 +                       wakeup = 1;
228 +       } else {
229 +               BUG_ON(1);
230 +       }
231 +       if (wakeup) {
232 +               hl->dl_pid = 0;
233 +               wake_up(&hl->dl_wait);
234 +       }
235 +       if (--(hl->dl_refcount) == 0) {
236 +               hl->dl_magic = DYNLOCK_MAGIC2;
237 +               list_del(&hl->dl_list);
238 +               kmem_cache_free(dynlock_cachep, hl);
239 +       }
240 +       spin_unlock(&dl->dl_list_lock);
241 +}
242 +
243 +EXPORT_SYMBOL(dynlock_init);
244 +EXPORT_SYMBOL(dynlock_lock);
245 +EXPORT_SYMBOL(dynlock_unlock);
246 +
247 Index: linux-2.4.24/lib/Makefile
248 ===================================================================
249 --- linux-2.4.24.orig/lib/Makefile      2004-06-24 09:06:32.000000000 +0400
250 +++ linux-2.4.24/lib/Makefile   2004-07-16 15:54:06.000000000 +0400
251 @@ -9,10 +9,10 @@
252  L_TARGET := lib.a
253  
254  export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o \
255 -              rbtree.o crc32.o firmware_class.o
256 +              rbtree.o crc32.o firmware_class.o dynlocks.o
257  
258  obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \
259 -        bust_spinlocks.o rbtree.o dump_stack.o
260 +        bust_spinlocks.o rbtree.o dump_stack.o dynlocks.o
261  
262  obj-$(CONFIG_FW_LOADER) += firmware_class.o
263  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
264 Index: linux-2.4.24/fs/dcache.c
265 ===================================================================
266 --- linux-2.4.24.orig/fs/dcache.c       2004-07-16 12:35:54.000000000 +0400
267 +++ linux-2.4.24/fs/dcache.c    2004-07-16 15:54:06.000000000 +0400
268 @@ -1274,6 +1274,7 @@
269  extern void bdev_cache_init(void);
270  extern void cdev_cache_init(void);
271  extern void iobuf_cache_init(void);
272 +extern void dynlock_cache_init(void);
273  
274  void __init vfs_caches_init(unsigned long mempages)
275  {
276 @@ -1310,4 +1311,5 @@
277         bdev_cache_init();
278         cdev_cache_init();
279         iobuf_cache_init();
280 +       dynlock_cache_init();
281  }