Whamcloud - gitweb
b=17671
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-common.patch
1 Index: linux-2.6.16.46-0.14/include/linux/dynlocks.h
2 ===================================================================
3 --- linux-2.6.16.46-0.14.orig/include/linux/dynlocks.h  2006-06-16 16:07:58.000000000 +0300
4 +++ linux-2.6.16.46-0.14/include/linux/dynlocks.h       2007-08-30 05:45:51.000000000 +0300
5 @@ -0,0 +1,37 @@
6 +#ifndef _LINUX_DYNLOCKS_H
7 +#define _LINUX_DYNLOCKS_H
8 +
9 +#include <linux/list.h>
10 +#include <linux/wait.h>
11 +
12 +#define DYNLOCK_MAGIC          0xd19a10c
13 +#define DYNLOCK_MAGIC2         0xd1956ee
14 +
15 +struct dynlock;
16 +struct dynlock_handle;
17 +
18 +/*
19 + * lock's namespace:
20 + *   - list of locks
21 + *   - lock to protect this list
22 + */
23 +
24 +struct dynlock {
25 +       unsigned dl_magic;
26 +       struct list_head dl_list;
27 +       spinlock_t dl_list_lock;
28 +};
29 +
30 +enum dynlock_type {
31 +       DLT_WRITE,
32 +       DLT_READ
33 +};
34 +
35 +void dynlock_init(struct dynlock *dl);
36 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
37 +                                   enum dynlock_type lt, int gfp);
38 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
39 +
40 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
41 +#endif
42 +
43 Index: linux-2.6.16.46-0.14/lib/dynlocks.c
44 ===================================================================
45 --- linux-2.6.16.46-0.14.orig/lib/dynlocks.c    2006-06-16 16:07:58.000000000 +0300
46 +++ linux-2.6.16.46-0.14/lib/dynlocks.c 2007-08-30 05:45:51.000000000 +0300
47 @@ -0,0 +1,226 @@
48 +/*
49 + * Dynamic Locks
50 + *
51 + * struct dynlock is lockspace
52 + * one may request lock (exclusive or shared) for some value
53 + * in that lockspace
54 + *
55 + */
56 +
57 +#include <linux/dynlocks.h>
58 +#include <linux/module.h>
59 +#include <linux/slab.h>
60 +#include <linux/sched.h>
61 +
62 +static kmem_cache_t * dynlock_cachep = NULL;
63 +
64 +struct dynlock_handle {
65 +       unsigned                dl_magic;
66 +       struct list_head        dl_list;
67 +       unsigned long           dl_value;       /* lock value */
68 +       int                     dl_refcount;    /* number of users */
69 +       int                     dl_readers;
70 +       int                     dl_writers;
71 +       int                     dl_pid;         /* holder of the lock */
72 +       wait_queue_head_t       dl_wait;
73 +};
74 +
75 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
76 +
77 +void __init dynlock_cache_init(void)
78 +{
79 +       printk(KERN_INFO "init dynlocks cache\n");
80 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
81 +                                        sizeof(struct dynlock_handle),
82 +                                        0,
83 +                                        SLAB_HWCACHE_ALIGN,
84 +                                        NULL, NULL);
85 +       if (dynlock_cachep == NULL)
86 +               panic("Can't create dynlock cache");
87 +}
88 +
89 +/*
90 + * dynlock_init
91 + *
92 + * initialize lockspace
93 + *
94 + */
95 +void dynlock_init(struct dynlock *dl)
96 +{
97 +       spin_lock_init(&dl->dl_list_lock);
98 +       INIT_LIST_HEAD(&dl->dl_list);
99 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
100 +}
101 +
102 +/*
103 + * dynlock_lock
104 + *
105 + * acquires lock (exclusive or shared) in specified lockspace
106 + * each lock in lockspace is allocated separately, so user have
107 + * to specify GFP flags.
108 + * routine returns pointer to lock. this pointer is intended to
109 + * be passed to dynlock_unlock
110 + *
111 + */
112 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
113 +                                   enum dynlock_type lt, int gfp)
114 +{
115 +       struct dynlock_handle *nhl = NULL;
116 +       struct dynlock_handle *hl;
117 +
118 +       BUG_ON(dl == NULL);
119 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
120 +
121 +repeat:
122 +       /* find requested lock in lockspace */
123 +       spin_lock(&dl->dl_list_lock);
124 +       BUG_ON(dl->dl_list.next == NULL);
125 +       BUG_ON(dl->dl_list.prev == NULL);
126 +       list_for_each_entry(hl, &dl->dl_list, dl_list) {
127 +               BUG_ON(hl->dl_list.next == NULL);
128 +               BUG_ON(hl->dl_list.prev == NULL);
129 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
130 +               if (hl->dl_value == value) {
131 +                       /* lock is found */
132 +                       if (nhl) {
133 +                               /* someone else just allocated
134 +                                * lock we didn't find and just created
135 +                                * so, we drop our lock
136 +                                */
137 +                               kmem_cache_free(dynlock_cachep, nhl);
138 +                               nhl = NULL;
139 +                       }
140 +                       hl->dl_refcount++;
141 +                       goto found;
142 +               }
143 +       }
144 +       /* lock not found */
145 +       if (nhl) {
146 +               /* we already have allocated lock. use it */
147 +               hl = nhl;
148 +               nhl = NULL;
149 +               list_add(&hl->dl_list, &dl->dl_list);
150 +               goto found;
151 +       }
152 +       spin_unlock(&dl->dl_list_lock);
153 +       
154 +       /* lock not found and we haven't allocated lock yet. allocate it */
155 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
156 +       if (nhl == NULL)
157 +               return NULL;
158 +       nhl->dl_refcount = 1;
159 +       nhl->dl_value = value;
160 +       nhl->dl_readers = 0;
161 +       nhl->dl_writers = 0;
162 +       nhl->dl_magic = DYNLOCK_MAGIC;
163 +       init_waitqueue_head(&nhl->dl_wait);
164 +
165 +       /* while lock is being allocated, someone else may allocate it
166 +        * and put onto to list. check this situation
167 +        */
168 +       goto repeat;
169 +
170 +found:
171 +       if (lt == DLT_WRITE) {
172 +               /* exclusive lock: user don't want to share lock at all
173 +                * NOTE: one process may take the same lock several times
174 +                * this functionaly is useful for rename operations */
175 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
176 +                               hl->dl_readers) {
177 +                       spin_unlock(&dl->dl_list_lock);
178 +                       wait_event(hl->dl_wait,
179 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
180 +                       spin_lock(&dl->dl_list_lock);
181 +               }
182 +               hl->dl_writers++;
183 +       } else {
184 +               /* shared lock: user do not want to share lock with writer */
185 +               while (hl->dl_writers) {
186 +                       spin_unlock(&dl->dl_list_lock);
187 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
188 +                       spin_lock(&dl->dl_list_lock);
189 +               }
190 +               hl->dl_readers++;
191 +       }
192 +       hl->dl_pid = current->pid;
193 +       spin_unlock(&dl->dl_list_lock);
194 +
195 +       return hl;
196 +}
197 +
198 +
199 +/*
200 + * dynlock_unlock
201 + *
202 + * user have to specify lockspace (dl) and pointer to lock structure
203 + * returned by dynlock_lock()
204 + *
205 + */
206 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
207 +{
208 +       int wakeup = 0;
209 +       
210 +       BUG_ON(dl == NULL);
211 +       BUG_ON(hl == NULL);
212 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
213 +
214 +       if (hl->dl_magic != DYNLOCK_MAGIC)
215 +               printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dl_magic);
216 +
217 +       BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
218 +       BUG_ON(hl->dl_writers != 0 && current->pid != hl->dl_pid);
219 +
220 +       spin_lock(&dl->dl_list_lock);
221 +       if (hl->dl_writers) {
222 +               BUG_ON(hl->dl_readers != 0);
223 +               hl->dl_writers--;
224 +               if (hl->dl_writers == 0)
225 +                       wakeup = 1;
226 +       } else if (hl->dl_readers) {
227 +               hl->dl_readers--;
228 +               if (hl->dl_readers == 0)
229 +                       wakeup = 1;
230 +       } else {
231 +               BUG();
232 +       }
233 +       if (wakeup) {
234 +               hl->dl_pid = 0;
235 +               wake_up(&hl->dl_wait);
236 +       }
237 +       if (--(hl->dl_refcount) == 0) {
238 +               hl->dl_magic = DYNLOCK_MAGIC2;
239 +               list_del(&hl->dl_list);
240 +               kmem_cache_free(dynlock_cachep, hl);
241 +       }
242 +       spin_unlock(&dl->dl_list_lock);
243 +}
244 +
245 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
246 +{
247 +       struct dynlock_handle *hl;
248 +       int result;
249 +
250 +       result = 0;
251 +       /* find requested lock in lockspace */
252 +       spin_lock(&dl->dl_list_lock);
253 +       BUG_ON(dl->dl_list.next == NULL);
254 +       BUG_ON(dl->dl_list.prev == NULL);
255 +       list_for_each_entry(hl, &dl->dl_list, dl_list) {
256 +               BUG_ON(hl->dl_list.next == NULL);
257 +               BUG_ON(hl->dl_list.prev == NULL);
258 +               BUG_ON(hl->dl_magic != DYNLOCK_MAGIC);
259 +               if (hl->dl_value == value && hl->dl_pid == current->pid) {
260 +                       /* lock is found */
261 +                       result = 1;
262 +                       break;
263 +               }
264 +       }
265 +       spin_unlock(&dl->dl_list_lock);
266 +       return result;
267 +}
268 +
269 +EXPORT_SYMBOL(dynlock_init);
270 +EXPORT_SYMBOL(dynlock_lock);
271 +EXPORT_SYMBOL(dynlock_unlock);
272 +EXPORT_SYMBOL(dynlock_is_locked);
273 +