Whamcloud - gitweb
b=20668
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-dynlocks-common-sles11.patch
1 diff -rupN 2.6.27.21/fs/ext4/dynlocks.c 2.6.27.21_1//fs/ext4/dynlocks.c
2 --- 2.6.27.21/fs/ext4/dynlocks.c        1970-01-01 05:30:00.000000000 +0530
3 +++ 2.6.27.21_1//fs/ext4/dynlocks.c     2009-08-23 10:39:59.000000000 +0530
4 @@ -0,0 +1,238 @@
5 +/*
6 + * Dynamic Locks
7 + *
8 + * struct dynlock is lockspace
9 + * one may request lock (exclusive or shared) for some value
10 + * in that lockspace
11 + *
12 + */
13 +
14 +#include <linux/dynlocks.h>
15 +#include <linux/module.h>
16 +#include <linux/slab_def.h>
17 +#include <linux/sched.h>
18 +
19 +#define DYNLOCK_HANDLE_MAGIC   0xd19a10c
20 +#define DYNLOCK_HANDLE_DEAD    0xd1956ee
21 +#define DYNLOCK_LIST_MAGIC     0x11ee91e6
22 +
23 +typedef struct kmem_cache kmem_cache_t;
24 +
25 +static kmem_cache_t * dynlock_cachep = NULL;
26 +
27 +struct dynlock_handle {
28 +       unsigned                dh_magic;
29 +       struct list_head        dh_list;
30 +       unsigned long           dh_value;       /* lock value */
31 +       int                     dh_refcount;    /* number of users */
32 +       int                     dh_readers;
33 +       int                     dh_writers;
34 +       int                     dh_pid;         /* holder of the lock */
35 +       wait_queue_head_t       dh_wait;
36 +};
37 +
38 +int __init dynlock_cache_init(void)
39 +{
40 +       int rc = 0;
41 +
42 +       printk(KERN_INFO "init dynlocks cache\n");
43 +       dynlock_cachep = kmem_cache_create("dynlock_cache",
44 +                                        sizeof(struct dynlock_handle),
45 +                                        0,
46 +                                        SLAB_HWCACHE_ALIGN,
47 +                                        NULL);
48 +       if (dynlock_cachep == NULL) {
49 +               printk(KERN_ERR "Not able to create dynlock cache");
50 +               rc = -ENOMEM;
51 +       }
52 +       return rc;
53 +}
54 +
55 +void __exit dynlock_cache_exit(void)
56 +{
57 +       printk(KERN_INFO "exit dynlocks cache\n");
58 +       kmem_cache_destroy(dynlock_cachep);
59 +}
60 +
61 +/*
62 + * dynlock_init
63 + *
64 + * initialize lockspace
65 + *
66 + */
67 +void dynlock_init(struct dynlock *dl)
68 +{
69 +       spin_lock_init(&dl->dl_list_lock);
70 +       INIT_LIST_HEAD(&dl->dl_list);
71 +       dl->dl_magic = DYNLOCK_LIST_MAGIC;
72 +}
73 +EXPORT_SYMBOL(dynlock_init);
74 +
75 +/*
76 + * dynlock_lock
77 + *
78 + * acquires lock (exclusive or shared) in specified lockspace
79 + * each lock in lockspace is allocated separately, so user have
80 + * to specify GFP flags.
81 + * routine returns pointer to lock. this pointer is intended to
82 + * be passed to dynlock_unlock
83 + *
84 + */
85 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
86 +                                   enum dynlock_type lt, gfp_t gfp)
87 +{
88 +       struct dynlock_handle *nhl = NULL;
89 +       struct dynlock_handle *hl;
90 +
91 +       BUG_ON(dl == NULL);
92 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
93 +
94 +repeat:
95 +       /* find requested lock in lockspace */
96 +       spin_lock(&dl->dl_list_lock);
97 +       BUG_ON(dl->dl_list.next == NULL);
98 +       BUG_ON(dl->dl_list.prev == NULL);
99 +       list_for_each_entry(hl, &dl->dl_list, dh_list) {
100 +               BUG_ON(hl->dh_list.next == NULL);
101 +               BUG_ON(hl->dh_list.prev == NULL);
102 +               BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
103 +               if (hl->dh_value == value) {
104 +                       /* lock is found */
105 +                       if (nhl) {
106 +                               /* someone else just allocated
107 +                                * lock we didn't find and just created
108 +                                * so, we drop our lock
109 +                                */
110 +                               kmem_cache_free(dynlock_cachep, nhl);
111 +                               nhl = NULL;
112 +                       }
113 +                       hl->dh_refcount++;
114 +                       goto found;
115 +               }
116 +       }
117 +       /* lock not found */
118 +       if (nhl) {
119 +               /* we already have allocated lock. use it */
120 +               hl = nhl;
121 +               nhl = NULL;
122 +               list_add(&hl->dh_list, &dl->dl_list);
123 +               goto found;
124 +       }
125 +       spin_unlock(&dl->dl_list_lock);
126 +       
127 +       /* lock not found and we haven't allocated lock yet. allocate it */
128 +       nhl = kmem_cache_alloc(dynlock_cachep, gfp);
129 +       if (nhl == NULL)
130 +               return NULL;
131 +       nhl->dh_refcount = 1;
132 +       nhl->dh_value = value;
133 +       nhl->dh_readers = 0;
134 +       nhl->dh_writers = 0;
135 +       nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
136 +       init_waitqueue_head(&nhl->dh_wait);
137 +
138 +       /* while lock is being allocated, someone else may allocate it
139 +        * and put onto to list. check this situation
140 +        */
141 +       goto repeat;
142 +
143 +found:
144 +       if (lt == DLT_WRITE) {
145 +               /* exclusive lock: user don't want to share lock at all
146 +                * NOTE: one process may take the same lock several times
147 +                * this functionaly is useful for rename operations */
148 +               while ((hl->dh_writers && hl->dh_pid != current->pid) ||
149 +                               hl->dh_readers) {
150 +                       spin_unlock(&dl->dl_list_lock);
151 +                       wait_event(hl->dh_wait,
152 +                               hl->dh_writers == 0 && hl->dh_readers == 0);
153 +                       spin_lock(&dl->dl_list_lock);
154 +               }
155 +               hl->dh_writers++;
156 +       } else {
157 +               /* shared lock: user do not want to share lock with writer */
158 +               while (hl->dh_writers) {
159 +                       spin_unlock(&dl->dl_list_lock);
160 +                       wait_event(hl->dh_wait, hl->dh_writers == 0);
161 +                       spin_lock(&dl->dl_list_lock);
162 +               }
163 +               hl->dh_readers++;
164 +       }
165 +       hl->dh_pid = current->pid;
166 +       spin_unlock(&dl->dl_list_lock);
167 +
168 +       return hl;
169 +}
170 +EXPORT_SYMBOL(dynlock_lock);
171 +
172 +
173 +/*
174 + * dynlock_unlock
175 + *
176 + * user have to specify lockspace (dl) and pointer to lock structure
177 + * returned by dynlock_lock()
178 + *
179 + */
180 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
181 +{
182 +       int wakeup = 0;
183 +       
184 +       BUG_ON(dl == NULL);
185 +       BUG_ON(hl == NULL);
186 +       BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
187 +
188 +       if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
189 +               printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
190 +
191 +       BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
192 +       BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
193 +
194 +       spin_lock(&dl->dl_list_lock);
195 +       if (hl->dh_writers) {
196 +               BUG_ON(hl->dh_readers != 0);
197 +               hl->dh_writers--;
198 +               if (hl->dh_writers == 0)
199 +                       wakeup = 1;
200 +       } else if (hl->dh_readers) {
201 +               hl->dh_readers--;
202 +               if (hl->dh_readers == 0)
203 +                       wakeup = 1;
204 +       } else {
205 +               BUG();
206 +       }
207 +       if (wakeup) {
208 +               hl->dh_pid = 0;
209 +               wake_up(&hl->dh_wait);
210 +       }
211 +       if (--(hl->dh_refcount) == 0) {
212 +               hl->dh_magic = DYNLOCK_HANDLE_DEAD;
213 +               list_del(&hl->dh_list);
214 +               kmem_cache_free(dynlock_cachep, hl);
215 +       }
216 +       spin_unlock(&dl->dl_list_lock);
217 +}
218 +EXPORT_SYMBOL(dynlock_unlock);
219 +
220 +int dynlock_is_locked(struct dynlock *dl, unsigned long value)
221 +{
222 +       struct dynlock_handle *hl;
223 +       int result = 0;
224 +
225 +       /* find requested lock in lockspace */
226 +       spin_lock(&dl->dl_list_lock);
227 +       BUG_ON(dl->dl_list.next == NULL);
228 +       BUG_ON(dl->dl_list.prev == NULL);
229 +       list_for_each_entry(hl, &dl->dl_list, dh_list) {
230 +               BUG_ON(hl->dh_list.next == NULL);
231 +               BUG_ON(hl->dh_list.prev == NULL);
232 +               BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
233 +               if (hl->dh_value == value && hl->dh_pid == current->pid) {
234 +                       /* lock is found */
235 +                       result = 1;
236 +                       break;
237 +               }
238 +       }
239 +       spin_unlock(&dl->dl_list_lock);
240 +       return result;
241 +}
242 +EXPORT_SYMBOL(dynlock_is_locked);
243 diff -rupN 2.6.27.21/include/linux/dynlocks.h 2.6.27.21_1//include/linux/dynlocks.h
244 --- 2.6.27.21/include/linux/dynlocks.h  1970-01-01 05:30:00.000000000 +0530
245 +++ 2.6.27.21_1//include/linux/dynlocks.h       2009-08-23 10:40:07.000000000 +0530
246 @@ -0,0 +1,34 @@
247 +#ifndef _LINUX_DYNLOCKS_H
248 +#define _LINUX_DYNLOCKS_H
249 +
250 +#include <linux/list.h>
251 +#include <linux/wait.h>
252 +
253 +struct dynlock_handle;
254 +
255 +/*
256 + * lock's namespace:
257 + *   - list of locks
258 + *   - lock to protect this list
259 + */
260 +struct dynlock {
261 +       unsigned                dl_magic;
262 +       struct list_head        dl_list;
263 +       spinlock_t              dl_list_lock;
264 +};
265 +
266 +enum dynlock_type {
267 +       DLT_WRITE,
268 +       DLT_READ
269 +};
270 +
271 +int dynlock_cache_init(void);
272 +void dynlock_cache_exit(void);
273 +void dynlock_init(struct dynlock *dl);
274 +struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
275 +                                   enum dynlock_type lt, gfp_t gfp);
276 +void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *lock);
277 +int dynlock_is_locked(struct dynlock *dl, unsigned long value);
278 +
279 +#endif
280 +