Whamcloud - gitweb
Add journal checksum feature.
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.6-fc3.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 Index: linux-2.6.10/fs/dcache.c
7 ===================================================================
8 --- linux-2.6.10.orig/fs/dcache.c       2006-10-21 11:52:54.000000000 +0800
9 +++ linux-2.6.10/fs/dcache.c    2006-10-21 13:04:55.000000000 +0800
10 @@ -1664,6 +1664,7 @@
11  
12  extern void bdev_cache_init(void);
13  extern void chrdev_init(void);
14 +extern void dynlock_cache_init(void);
15  
16  void __init vfs_caches_init_early(void)
17  {
18 @@ -1693,6 +1694,7 @@
19         mnt_init(mempages);
20         bdev_cache_init();
21         chrdev_init();
22 +       dynlock_cache_init();
23  }
24  
25  EXPORT_SYMBOL(d_alloc);
26 Index: linux-2.6.10/include/linux/dynlocks.h
27 ===================================================================
28 --- linux-2.6.10.orig/include/linux/dynlocks.h  2006-05-31 09:15:07.000000000 +0800
29 +++ linux-2.6.10/include/linux/dynlocks.h       2006-10-21 13:04:55.000000000 +0800
30 @@ -0,0 +1,37 @@
31 +#ifndef _LINUX_DYNLOCKS_H
32 +#define _LINUX_DYNLOCKS_H
33 +
34 +#include <linux/list.h>
35 +#include <linux/wait.h>
36 +
37 +#define DYNLOCK_MAGIC          0xd19a10c
38 +#define DYNLOCK_MAGIC2         0xd1956ee
39 +
40 +struct dynlock;
41 +struct dynlock_handle;
42 +
43 +/*
44 + * lock's namespace:
45 + *   - list of locks
46 + *   - lock to protect this list
47 + */
48 +
49 +struct dynlock {
50 +       unsigned dl_magic;
51 +       struct list_head dl_list;
52 +       spinlock_t dl_list_lock;
53 +};
54 +
55 +enum dynlock_type {
56 +       DLT_NONE,
57 +       DLT_WRITE,
58 +       DLT_READ
59 +};
60 +
61 +void dynlock_init(struct dynlock *dl);
62 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
63 +                                   enum dynlock_type lt, int gfp);
64 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
65 +
66 +#endif
67 +
68 Index: linux-2.6.10/lib/Makefile
69 ===================================================================
70 --- linux-2.6.10.orig/lib/Makefile      2004-12-25 05:33:50.000000000 +0800
71 +++ linux-2.6.10/lib/Makefile   2006-10-21 13:08:20.000000000 +0800
72 @@ -5,7 +5,7 @@
73  lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
74          bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
75          kobject.o kref.o idr.o div64.o parser.o int_sqrt.o \
76 -        bitmap.o extable.o kobject_uevent.o
77 +        bitmap.o extable.o kobject_uevent.o dynlocks.o
78  
79  ifeq ($(CONFIG_DEBUG_KOBJECT),y)
80  CFLAGS_kobject.o += -DDEBUG
81 Index: linux-2.6.10/lib/dynlocks.c
82 ===================================================================
83 --- linux-2.6.10.orig/lib/dynlocks.c    2006-05-31 09:15:07.000000000 +0800
84 +++ linux-2.6.10/lib/dynlocks.c 2006-10-21 13:04:55.000000000 +0800
85 @@ -0,0 +1,203 @@
86 +/*
87 + * Dynamic Locks
88 + *
89 + * struct dynlock is lockspace
90 + * one may request lock (exclusive or shared) for some value
91 + * in that lockspace
92 + *
93 + */
94 +
95 +#include <linux/dynlocks.h>
96 +#include <linux/module.h>
97 +#include <linux/slab.h>
98 +#include <linux/sched.h>
99 +
100 +static kmem_cache_t * dynlock_cachep = NULL;
101 +
102 +struct dynlock_handle {
103 +       unsigned                dl_magic;
104 +       struct list_head        dl_list;
105 +       unsigned long           dl_value;       /* lock value */
106 +       int                     dl_refcount;    /* number of users */
107 +       int                     dl_readers;
108 +       int                     dl_writers;
109 +       int                     dl_pid;         /* holder of the lock */
110 +       wait_queue_head_t       dl_wait;
111 +};
112 +
113 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
114 +
115 +void __init dynlock_cache_init(void)
116 +{
117 +       printk(KERN_INFO "init dynlocks cache\n");
118 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
119 +                                        sizeof(struct dynlock_handle),
120 +                                        0,
121 +                                        SLAB_HWCACHE_ALIGN,
122 +                                        NULL, NULL);
123 +       if (dynlock_cachep == NULL)
124 +               panic("Can't create dynlock cache");
125 +}
126 +
127 +/*
128 + * dynlock_init
129 + *
130 + * initialize lockspace
131 + *
132 + */
133 +void dynlock_init(struct dynlock *dl)
134 +{
135 +       spin_lock_init(&dl->dl_list_lock);
136 +       INIT_LIST_HEAD(&dl->dl_list);
137 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
138 +}
139 +
140 +/*
141 + * dynlock_lock
142 + *
143 + * acquires lock (exclusive or shared) in specified lockspace
144 + * each lock in lockspace is allocated separately, so user have
145 + * to specify GFP flags.
146 + * routine returns pointer to lock. this pointer is intended to
147 + * be passed to dynlock_unlock
148 + *
149 + */
150 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
151 +                                   enum dynlock_type lt, int gfp)
152 +{
153 +       struct dynlock_handle *nhl = NULL;
154 +       struct dynlock_handle *hl;
155 +       struct list_head *cur;
156 +       int num = 0;
157 +
158 +       BUG_ON(dl == NULL);
159 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
160 +
161 +       if (lt == DLT_NONE)
162 +               return NULL;
163 +repeat:
164 +       /* find requested lock in lockspace */
165 +       spin_lock(&dl->dl_list_lock);
166 +       BUG_ON(dl->dl_list.next == NULL);
167 +       BUG_ON(dl->dl_list.prev == NULL);
168 +       list_for_each(cur, &dl->dl_list) {
169 +               BUG_ON(cur->next == NULL);
170 +               BUG_ON(cur->prev == NULL);
171 +               hl = list_entry(cur, struct dynlock_handle, dl_list);
172 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
173 +               if (hl->dl_value == value) {
174 +                       /* lock is found */
175 +                       if (nhl) {
176 +                               /* someone else just allocated
177 +                                * lock we didn't find and just created
178 +                                * so, we drop our lock
179 +                                */
180 +                               kmem_cache_free(dynlock_cachep, nhl);
181 +                               nhl = NULL;
182 +                       }
183 +                       hl->dl_refcount++;
184 +                       goto found;
185 +               }
186 +               num++;
187 +       }
188 +       /* lock not found */
189 +       if (nhl) {
190 +               /* we already have allocated lock. use it */
191 +               hl = nhl;
192 +               nhl = NULL;
193 +               list_add(&hl->dl_list, &dl->dl_list);
194 +               goto found;
195 +       }
196 +       spin_unlock(&dl->dl_list_lock);
197 +       
198 +       /* lock not found and we haven't allocated lock yet. allocate it */
199 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
200 +       if (nhl == NULL)
201 +               return NULL;
202 +       nhl->dl_refcount = 1;
203 +       nhl->dl_value = value;
204 +       nhl->dl_readers = 0;
205 +       nhl->dl_writers = 0;
206 +       nhl->dl_magic = DYNLOCK_MAGIC;
207 +       init_waitqueue_head(&nhl->dl_wait);
208 +
209 +       /* while lock is being allocated, someone else may allocate it
210 +        * and put onto to list. check this situation
211 +        */
212 +       goto repeat;
213 +
214 +found:
215 +       if (lt == DLT_WRITE) {
216 +               /* exclusive lock: user don't want to share lock at all
217 +                * NOTE: one process may take the same lock several times
218 +                * this functionaly is useful for rename operations */
219 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
220 +                               hl->dl_readers) {
221 +                       spin_unlock(&dl->dl_list_lock);
222 +                       wait_event(hl->dl_wait,
223 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
224 +                       spin_lock(&dl->dl_list_lock);
225 +               }
226 +               hl->dl_writers++;
227 +       } else {
228 +               /* shared lock: user do not want to share lock with writer */
229 +               while (hl->dl_writers) {
230 +                       spin_unlock(&dl->dl_list_lock);
231 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
232 +                       spin_lock(&dl->dl_list_lock);
233 +               }
234 +               hl->dl_readers++;
235 +       }
236 +       hl->dl_pid = current->pid;
237 +       spin_unlock(&dl->dl_list_lock);
238 +
239 +       return hl;
240 +}
241 +
242 +
243 +/*
244 + * dynlock_unlock
245 + *
246 + * user have to specify lockspace (dl) and pointer to lock structure
247 + * returned by dynlock_lock()
248 + *
249 + */
250 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
251 +{
252 +       int wakeup = 0;
253 +       
254 +       BUG_ON(dl == NULL);
255 +       BUG_ON(hl == NULL);
256 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
257 +       BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
258 +       BUG_ON(current->pid != hl->dl_pid);
259 +
260 +       spin_lock(&dl->dl_list_lock);
261 +       if (hl->dl_writers) {
262 +               BUG_ON(hl->dl_readers > 0 || hl->dl_readers < 0);
263 +               hl->dl_writers--;
264 +               if (hl->dl_writers == 0)
265 +                       wakeup = 1;
266 +       } else if (hl->dl_readers) {
267 +               hl->dl_readers--;
268 +               if (hl->dl_readers == 0)
269 +                       wakeup = 1;
270 +       } else {
271 +               BUG_ON(1);
272 +       }
273 +       if (wakeup) {
274 +               hl->dl_pid = 0;
275 +               wake_up(&hl->dl_wait);
276 +       }
277 +       if (--(hl->dl_refcount) == 0) {
278 +               hl->dl_magic = DYNLOCK_MAGIC2;
279 +               list_del(&hl->dl_list);
280 +               kmem_cache_free(dynlock_cachep, hl);
281 +       }
282 +       spin_unlock(&dl->dl_list_lock);
283 +}
284 +
285 +EXPORT_SYMBOL(dynlock_init);
286 +EXPORT_SYMBOL(dynlock_lock);
287 +EXPORT_SYMBOL(dynlock_unlock);
288 +