Whamcloud - gitweb
b=20668
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext3-dynlocks-common.patch
1 diff -rupN linux-2.6.18-128.1.6_1/fs/ext3/dynlocks.c linux-2.6.18-128.1.6_2/fs/ext3/dynlocks.c
2 --- linux-2.6.18-128.1.6_1/fs/ext3/dynlocks.c   1970-01-01 05:30:00.000000000 +0530
3 +++ linux-2.6.18-128.1.6_2/fs/ext3/dynlocks.c   2009-08-13 20:42:59.000000000 +0530
4 @@ -0,0 +1,236 @@
5 +/*
6 + * Dynamic Locks
7 + *
8 + * struct dynlock is lockspace
9 + * one may request lock (exclusive or shared) for some value
10 + * in that lockspace
11 + *
12 + */
13 +
14 +#include <linux/dynlocks.h>
15 +#include <linux/module.h>
16 +#include <linux/slab.h>
17 +#include <linux/sched.h>
18 +
19 +#define DYNLOCK_HANDLE_MAGIC   0xd19a10c
20 +#define DYNLOCK_HANDLE_DEAD    0xd1956ee
21 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
22 +
23 +static kmem_cache_t * dynlock_cachep = NULL;
24 +
25 +struct dynlock_handle {
26 +       unsigned                dh_magic;
27 +       struct list_head        dh_list;
28 +       unsigned long           dh_value;       /* lock value */
29 +       int                     dh_refcount;    /* number of users */
30 +       int                     dh_readers;
31 +       int                     dh_writers;
32 +       int                     dh_pid;         /* holder of the lock */
33 +       wait_queue_head_t       dh_wait;
34 +};
35 +
36 +int __init dynlock_cache_init(void)
37 +{
38 +       int rc = 0;
39 +
40 +       printk(KERN_INFO "init dynlocks cache\n");
41 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
42 +                                        sizeof(struct dynlock_handle),
43 +                                        0,
44 +                                        SLAB_HWCACHE_ALIGN,
45 +                                        NULL, NULL);
46 +       if (dynlock_cachep == NULL) {
47 +               printk(KERN_ERR "Not able to create dynlock cache");
48 +               rc = -ENOMEM;
49 +       }
50 +       return rc;
51 +}
52 +
53 +void __exit dynlock_cache_exit(void)
54 +{
55 +       printk(KERN_INFO "exit dynlocks cache\n");
56 +       kmem_cache_destroy(dynlock_cachep);
57 +}
58 +
59 +/*
60 + * dynlock_init
61 + *
62 + * initialize lockspace
63 + *
64 + */
65 +void dynlock_init(struct dynlock *dl)
66 +{
67 +       spin_lock_init(&dl->dl_list_lock);
68 +       INIT_LIST_HEAD(&dl->dl_list);
69 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
70 +}
71 +EXPORT_SYMBOL(dynlock_init);
72 +
73 +/*
74 + * dynlock_lock
75 + *
76 + * acquires lock (exclusive or shared) in specified lockspace
77 + * each lock in lockspace is allocated separately, so user have
78 + * to specify GFP flags.
79 + * routine returns pointer to lock. this pointer is intended to
80 + * be passed to dynlock_unlock
81 + *
82 + */
83 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
84 +                                   enum dynlock_type lt, gfp_t gfp)
85 +{
86 +       struct dynlock_handle *nhl = NULL;
87 +       struct dynlock_handle *hl;
88 +
89 +       BUG_ON(dl == NULL);
90 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
91 +
92 +repeat:
93 +       /* find requested lock in lockspace */
94 +       spin_lock(&dl->dl_list_lock);
95 +       BUG_ON(dl->dl_list.next == NULL);
96 +       BUG_ON(dl->dl_list.prev == NULL);
97 +       list_for_each_entry(hl, &dl->dl_list, dh_list) {
98 +               BUG_ON(hl->dh_list.next == NULL);
99 +               BUG_ON(hl->dh_list.prev == NULL);
100 +               BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
101 +               if (hl->dh_value == value) {
102 +                       /* lock is found */
103 +                       if (nhl) {
104 +                               /* someone else just allocated
105 +                                * lock we didn't find and just created
106 +                                * so, we drop our lock
107 +                                */
108 +                               kmem_cache_free(dynlock_cachep, nhl);
109 +                               nhl = NULL;
110 +                       }
111 +                       hl->dh_refcount++;
112 +                       goto found;
113 +               }
114 +       }
115 +       /* lock not found */
116 +       if (nhl) {
117 +               /* we already have allocated lock. use it */
118 +               hl = nhl;
119 +               nhl = NULL;
120 +               list_add(&hl->dh_list, &dl->dl_list);
121 +               goto found;
122 +       }
123 +       spin_unlock(&dl->dl_list_lock);
124 +       
125 +       /* lock not found and we haven't allocated lock yet. allocate it */
126 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
127 +       if (nhl == NULL)
128 +               return NULL;
129 +       nhl->dh_refcount = 1;
130 +       nhl->dh_value = value;
131 +       nhl->dh_readers = 0;
132 +       nhl->dh_writers = 0;
133 +       nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
134 +       init_waitqueue_head(&nhl->dh_wait);
135 +
136 +       /* while lock is being allocated, someone else may allocate it
137 +        * and put onto to list. check this situation
138 +        */
139 +       goto repeat;
140 +
141 +found:
142 +       if (lt == DLT_WRITE) {
143 +               /* exclusive lock: user don't want to share lock at all
144 +                * NOTE: one process may take the same lock several times
145 +                * this functionaly is useful for rename operations */
146 +               while ((hl->dh_writers && hl->dh_pid != current->pid) ||
147 +                               hl->dh_readers) {
148 +                       spin_unlock(&dl->dl_list_lock);
149 +                       wait_event(hl->dh_wait,
150 +                               hl->dh_writers == 0 && hl->dh_readers == 0);
151 +                       spin_lock(&dl->dl_list_lock);
152 +               }
153 +               hl->dh_writers++;
154 +       } else {
155 +               /* shared lock: user do not want to share lock with writer */
156 +               while (hl->dh_writers) {
157 +                       spin_unlock(&dl->dl_list_lock);
158 +                       wait_event(hl->dh_wait, hl->dh_writers == 0);
159 +                       spin_lock(&dl->dl_list_lock);
160 +               }
161 +               hl->dh_readers++;
162 +       }
163 +       hl->dh_pid = current->pid;
164 +       spin_unlock(&dl->dl_list_lock);
165 +
166 +       return hl;
167 +}
168 +EXPORT_SYMBOL(dynlock_lock);
169 +
170 +
171 +/*
172 + * dynlock_unlock
173 + *
174 + * user have to specify lockspace (dl) and pointer to lock structure
175 + * returned by dynlock_lock()
176 + *
177 + */
178 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
179 +{
180 +       int wakeup = 0;
181 +       
182 +       BUG_ON(dl == NULL);
183 +       BUG_ON(hl == NULL);
184 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
185 +
186 +       if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
187 +               printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
188 +
189 +       BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
190 +       BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
191 +
192 +       spin_lock(&dl->dl_list_lock);
193 +       if (hl->dh_writers) {
194 +               BUG_ON(hl->dh_readers != 0);
195 +               hl->dh_writers--;
196 +               if (hl->dh_writers == 0)
197 +                       wakeup = 1;
198 +       } else if (hl->dh_readers) {
199 +               hl->dh_readers--;
200 +               if (hl->dh_readers == 0)
201 +                       wakeup = 1;
202 +       } else {
203 +               BUG();
204 +       }
205 +       if (wakeup) {
206 +               hl->dh_pid = 0;
207 +               wake_up(&hl->dh_wait);
208 +       }
209 +       if (--(hl->dh_refcount) == 0) {
210 +               hl->dh_magic = DYNLOCK_HANDLE_DEAD;
211 +               list_del(&hl->dh_list);
212 +               kmem_cache_free(dynlock_cachep, hl);
213 +       }
214 +       spin_unlock(&dl->dl_list_lock);
215 +}
216 +EXPORT_SYMBOL(dynlock_unlock);
217 +
218 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
219 +{
220 +       struct dynlock_handle *hl;
221 +       int result = 0;
222 +
223 +       /* find requested lock in lockspace */
224 +       spin_lock(&dl->dl_list_lock);
225 +       BUG_ON(dl->dl_list.next == NULL);
226 +       BUG_ON(dl->dl_list.prev == NULL);
227 +       list_for_each_entry(hl, &dl->dl_list, dh_list) {
228 +               BUG_ON(hl->dh_list.next == NULL);
229 +               BUG_ON(hl->dh_list.prev == NULL);
230 +               BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
231 +               if (hl->dh_value == value && hl->dh_pid == current->pid) {
232 +                       /* lock is found */
233 +                       result = 1;
234 +                       break;
235 +               }
236 +       }
237 +       spin_unlock(&dl->dl_list_lock);
238 +       return result;
239 +}
240 +EXPORT_SYMBOL(dynlock_is_locked);
241 diff -rupN linux-2.6.18-128.1.6_1/include/linux/dynlocks.h linux-2.6.18-128.1.6_2/include/linux/dynlocks.h
242 --- linux-2.6.18-128.1.6_1/include/linux/dynlocks.h     1970-01-01 05:30:00.000000000 +0530
243 +++ linux-2.6.18-128.1.6_2/include/linux/dynlocks.h     2009-08-13 20:43:18.000000000 +0530
244 @@ -0,0 +1,34 @@
245 +#ifndef _LINUX_DYNLOCKS_H
246 +#define _LINUX_DYNLOCKS_H
247 +
248 +#include <linux/list.h>
249 +#include <linux/wait.h>
250 +
251 +struct dynlock_handle;
252 +
253 +/*
254 + * lock's namespace:
255 + *   - list of locks
256 + *   - lock to protect this list
257 + */
258 +struct dynlock {
259 +       unsigned                dl_magic;
260 +       struct list_head        dl_list;
261 +       spinlock_t              dl_list_lock;
262 +};
263 +
264 +enum dynlock_type {
265 +       DLT_WRITE,
266 +       DLT_READ
267 +};
268 +
269 +int dynlock_cache_init(void);
270 +void dynlock_cache_exit(void);
271 +void dynlock_init(struct dynlock *dl);
272 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
273 +                                   enum dynlock_type lt, gfp_t gfp);
274 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
275 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
276 +
277 +#endif
278 +