Whamcloud - gitweb
return 128 as filetype for cross-ref entry
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.6.7.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 Index: linux-2.6.7/include/linux/dynlocks.h
7 ===================================================================
8 --- linux-2.6.7.orig/include/linux/dynlocks.h   2003-01-30 13:24:37.000000000 +0300
9 +++ linux-2.6.7/include/linux/dynlocks.h        2004-09-07 14:12:39.000000000 +0400
10 @@ -0,0 +1,43 @@
11 +#ifndef _LINUX_DYNLOCKS_H
12 +#define _LINUX_DYNLOCKS_H
13 +
14 +#include <linux/list.h>
15 +#include <linux/wait.h>
16 +
17 +#define DYNLOCK_MAGIC          0xd19a10c
18 +#define DYNLOCK_MAGIC2         0xd1956ee
19 +
20 +struct dynlock;
21 +
22 +struct dynlock_member {
23 +       unsigned                dl_magic;
24 +       struct list_head        dl_list;
25 +       unsigned long           dl_value;       /* lock value */
26 +       int                     dl_refcount;    /* number of users */
27 +       int                     dl_readers;
28 +       int                     dl_writers;
29 +       int                     dl_pid;         /* holder of the lock */
30 +       wait_queue_head_t       dl_wait;
31 +};
32 +
33 +/*
34 + * lock's namespace:
35 + *   - list of locks
36 + *   - lock to protect this list
37 + */
38 +
39 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
40 +
41 +struct dynlock {
42 +       unsigned dl_magic;
43 +       struct list_head dl_list;
44 +       spinlock_t dl_list_lock;
45 +};
46 +
47 +void dynlock_init(struct dynlock *dl);
48 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp);
49 +void dynlock_unlock(struct dynlock *dl, void *lock);
50 +
51 +
52 +#endif
53 +
54 Index: linux-2.6.7/lib/dynlocks.c
55 ===================================================================
56 --- linux-2.6.7.orig/lib/dynlocks.c     2003-01-30 13:24:37.000000000 +0300
57 +++ linux-2.6.7/lib/dynlocks.c  2004-09-07 14:12:39.000000000 +0400
58 @@ -0,0 +1,187 @@
59 +/*
60 + * Dynamic Locks
61 + *
62 + * struct dynlock is lockspace
63 + * one may request lock (exclusive or shared) for some value
64 + * in that lockspace
65 + *
66 + */
67 +
68 +#include <linux/dynlocks.h>
69 +#include <linux/module.h>
70 +#include <linux/slab.h>
71 +#include <linux/sched.h>
72 +
73 +static kmem_cache_t * dynlock_cachep = NULL;
74 +
75 +void __init dynlock_cache_init(void)
76 +{
77 +       printk(KERN_INFO "init dynlocks cache\n");
78 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
79 +                                        sizeof(struct dynlock_member),
80 +                                        0,
81 +                                        SLAB_HWCACHE_ALIGN,
82 +                                        NULL, NULL);
83 +       if (dynlock_cachep == NULL)
84 +               panic("Can't create dynlock cache");
85 +}
86 +
87 +/*
88 + * dynlock_init
89 + *
90 + * initialize lockspace
91 + *
92 + */
93 +void dynlock_init(struct dynlock *dl)
94 +{
95 +       spin_lock_init(&dl->dl_list_lock);
96 +       INIT_LIST_HEAD(&dl->dl_list);
97 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
98 +}
99 +
100 +/*
101 + * dynlock_lock
102 + *
103 + * acquires lock (exclusive or shared) in specified lockspace
104 + * each lock in lockspace is allocated separately, so user have
105 + * to specify GFP flags.
106 + * routine returns pointer to lock. this pointer is intended to
107 + * be passed to dynlock_unlock
108 + *
109 + */
110 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp)
111 +{
112 +       struct dynlock_member *nhl = NULL; 
113 +       struct dynlock_member *hl; 
114 +       struct list_head *cur;
115 +       int num = 0;
116 +
117 +       BUG_ON(dl == NULL);
118 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
119 +repeat:
120 +       /* find requested lock in lockspace */
121 +       spin_lock(&dl->dl_list_lock);
122 +       BUG_ON(dl->dl_list.next == NULL);
123 +       BUG_ON(dl->dl_list.prev == NULL);
124 +       list_for_each(cur, &dl->dl_list) {
125 +               BUG_ON(cur->next == NULL);
126 +               BUG_ON(cur->prev == NULL);
127 +               hl = list_entry(cur, struct dynlock_member, dl_list);
128 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
129 +               if (hl->dl_value == value) {
130 +                       /* lock is found */
131 +                       if (nhl) {
132 +                               /* someone else just allocated
133 +                                * lock we didn't find and just created
134 +                                * so, we drop our lock
135 +                                */
136 +                               kmem_cache_free(dynlock_cachep, nhl);
137 +                               nhl = NULL;
138 +                       }
139 +                       hl->dl_refcount++;
140 +                       goto found;
141 +               }
142 +               num++;
143 +       }
144 +       /* lock not found */
145 +       if (nhl) {
146 +               /* we already have allocated lock. use it */
147 +               hl = nhl;
148 +               nhl = NULL;
149 +               list_add(&hl->dl_list, &dl->dl_list);
150 +               goto found;
151 +       }
152 +       spin_unlock(&dl->dl_list_lock);
153 +       
154 +       /* lock not found and we haven't allocated lock yet. allocate it */
155 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
156 +       if (nhl == NULL)
157 +               return NULL;
158 +       nhl->dl_refcount = 1;
159 +       nhl->dl_value = value;
160 +       nhl->dl_readers = 0;
161 +       nhl->dl_writers = 0;
162 +       nhl->dl_magic = DYNLOCK_MAGIC;
163 +       init_waitqueue_head(&nhl->dl_wait);
164 +
165 +       /* while lock is being allocated, someone else may allocate it
166 +        * and put onto to list. check this situation
167 +        */
168 +       goto repeat;
169 +
170 +found:
171 +       if (rw) {
172 +               /* exclusive lock: user don't want to share lock at all
173 +                * NOTE: one process may take the same lock several times
174 +                * this functionaly is useful for rename operations */
175 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
176 +                               hl->dl_readers) {
177 +                       spin_unlock(&dl->dl_list_lock);
178 +                       wait_event(hl->dl_wait,
179 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
180 +                       spin_lock(&dl->dl_list_lock);
181 +               }
182 +               hl->dl_writers++;
183 +       } else {
184 +               /* shared lock: user do not want to share lock with writer */
185 +               while (hl->dl_writers) {
186 +                       spin_unlock(&dl->dl_list_lock);
187 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
188 +                       spin_lock(&dl->dl_list_lock);
189 +               }
190 +               hl->dl_readers++;
191 +       }
192 +       hl->dl_pid = current->pid;
193 +       spin_unlock(&dl->dl_list_lock);
194 +
195 +       return hl;
196 +}
197 +
198 +
199 +/*
200 + * dynlock_unlock
201 + *
202 + * user have to specify lockspace (dl) and pointer to lock structure
203 + * returned by dynlock_lock()
204 + *
205 + */
206 +void dynlock_unlock(struct dynlock *dl, void *lock)
207 +{
208 +       struct dynlock_member *hl = lock;
209 +       int wakeup = 0;
210 +       
211 +       BUG_ON(dl == NULL);
212 +       BUG_ON(hl == NULL);
213 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
214 +       BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
215 +       BUG_ON(current->pid != hl->dl_pid);
216 +
217 +       spin_lock(&dl->dl_list_lock);
218 +       if (hl->dl_writers) {
219 +               BUG_ON(hl->dl_readers > 0 || hl->dl_readers < 0);
220 +               hl->dl_writers--;
221 +               if (hl->dl_writers == 0)
222 +                       wakeup = 1;
223 +       } else if (hl->dl_readers) {
224 +               hl->dl_readers--;
225 +               if (hl->dl_readers == 0)
226 +                       wakeup = 1;
227 +       } else {
228 +               BUG_ON(1);
229 +       }
230 +       if (wakeup) {
231 +               hl->dl_pid = 0;
232 +               wake_up(&hl->dl_wait);
233 +       }
234 +       if (--(hl->dl_refcount) == 0) {
235 +               hl->dl_magic = DYNLOCK_MAGIC2;
236 +               list_del(&hl->dl_list);
237 +               kmem_cache_free(dynlock_cachep, hl);
238 +       }
239 +       spin_unlock(&dl->dl_list_lock);
240 +}
241 +
242 +EXPORT_SYMBOL(dynlock_init);
243 +EXPORT_SYMBOL(dynlock_lock);
244 +EXPORT_SYMBOL(dynlock_unlock);
245 +
246 Index: linux-2.6.7/lib/Makefile
247 ===================================================================
248 --- linux-2.6.7.orig/lib/Makefile       2004-08-26 17:12:16.000000000 +0400
249 +++ linux-2.6.7/lib/Makefile    2004-09-07 14:13:03.000000000 +0400
250 @@ -6,7 +6,7 @@
251  lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
252          bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
253          kobject.o idr.o div64.o parser.o int_sqrt.o \
254 -        bitmap.o extable.o
255 +        bitmap.o extable.o dynlocks.o
256  
257  # hack for now till some static code uses krefs, then it can move up above...
258  obj-y += kref.o
259 Index: linux-2.6.7/fs/dcache.c
260 ===================================================================
261 --- linux-2.6.7.orig/fs/dcache.c        2004-09-06 22:53:19.000000000 +0400
262 +++ linux-2.6.7/fs/dcache.c     2004-09-07 14:13:42.000000000 +0400
263 @@ -1654,6 +1654,7 @@
264  
265  extern void bdev_cache_init(void);
266  extern void chrdev_init(void);
267 +extern void dynlock_cache_init(void);
268  
269  void __init vfs_caches_init(unsigned long mempages)
270  {
271 @@ -1677,6 +1678,7 @@
272         mnt_init(mempages);
273         bdev_cache_init();
274         chrdev_init();
275 +       dynlock_cache_init();
276  }
277  
278  EXPORT_SYMBOL(d_alloc);