Whamcloud - gitweb
b=3119
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.4.18-chaos.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 --- /dev/null   2003-01-30 13:24:37.000000000 +0300
7 +++ linux-2.4.18-alexey/include/linux/dynlocks.h        2003-09-01 16:33:25.000000000 +0400
8 @@ -0,0 +1,33 @@
9 +#ifndef _LINUX_DYNLOCKS_H
10 +#define _LINUX_DYNLOCKS_H
11 +
12 +#include <linux/list.h>
13 +#include <linux/wait.h>
14 +
15 +struct dynlock_member {
16 +       struct list_head        dl_list;
17 +       unsigned long           dl_value;       /* lock value */
18 +       int                     dl_refcount;    /* number of users */
19 +       int                     dl_readers;
20 +       int                     dl_writers;
21 +       int                     dl_pid;         /* holder of the lock */
22 +       wait_queue_head_t       dl_wait;
23 +};
24 +
25 +/*
26 + * lock's namespace:
27 + *   - list of locks
28 + *   - lock to protect this list
29 + */
30 +struct dynlock {
31 +       struct list_head dl_list;
32 +       spinlock_t dl_list_lock;
33 +};
34 +
35 +void dynlock_init(struct dynlock *dl);
36 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp);
37 +void dynlock_unlock(struct dynlock *dl, void *lock);
38 +
39 +
40 +#endif
41 +
42 --- /dev/null   2003-01-30 13:24:37.000000000 +0300
43 +++ linux-2.4.18-alexey/lib/dynlocks.c  2003-09-01 16:36:00.000000000 +0400
44 @@ -0,0 +1,152 @@
45 +/*
46 + * Dynamic Locks
47 + *
48 + * struct dynlock is lockspace
49 + * one may request lock (exclusive or shared) for some value
50 + * in that lockspace
51 + *
52 + */
53 +
54 +#include <linux/dynlocks.h>
55 +#include <linux/module.h>
56 +#include <linux/slab.h>
57 +#include <linux/sched.h>
58 +
59 +/*
60 + * dynlock_init
61 + *
62 + * initialize lockspace
63 + *
64 + */
65 +void dynlock_init(struct dynlock *dl)
66 +{
67 +       spin_lock_init(&dl->dl_list_lock);
68 +       INIT_LIST_HEAD(&dl->dl_list);
69 +}
70 +
71 +/*
72 + * dynlock_lock
73 + *
74 + * acquires lock (exclusive or shared) in specified lockspace
75 + * each lock in lockspace is allocated separately, so user have
76 + * to specify GFP flags.
77 + * routine returns pointer to lock. this pointer is intended to
78 + * be passed to dynlock_unlock
79 + *
80 + */
81 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp)
82 +{
83 +       struct dynlock_member *nhl = NULL; 
84 +       struct dynlock_member *hl; 
85 +       struct list_head *cur;
86 +
87 +repeat:
88 +       /* find requested lock in lockspace */
89 +       spin_lock(&dl->dl_list_lock);
90 +       list_for_each(cur, &dl->dl_list) {
91 +               hl = list_entry(cur, struct dynlock_member, dl_list);
92 +               if (hl->dl_value == value) {
93 +                       /* lock is found */
94 +                       if (nhl) {
95 +                               /* someone else just allocated
96 +                                * lock we didn't find and just created
97 +                                * so, we drop our lock
98 +                                */
99 +                               kfree(nhl);
100 +                               nhl = NULL;
101 +                       }
102 +                       hl->dl_refcount++;
103 +                       goto found;
104 +               }
105 +       }
106 +       /* lock not found */
107 +       if (nhl) {
108 +               /* we already have allocated lock. use it */
109 +               hl = nhl;
110 +               nhl = NULL;
111 +               list_add(&hl->dl_list, &dl->dl_list);
112 +               goto found;
113 +       }
114 +       spin_unlock(&dl->dl_list_lock);
115 +       
116 +       /* lock not found and we haven't allocated lock yet. allocate it */
117 +       nhl = kmalloc(sizeof(struct dynlock_member), gfp);
118 +       if (nhl == NULL)
119 +               return NULL;
120 +       nhl->dl_refcount = 1;
121 +       nhl->dl_value = value;
122 +       nhl->dl_readers = 0;
123 +       nhl->dl_writers = 0;
124 +       init_waitqueue_head(&nhl->dl_wait);
125 +
126 +       /* while lock is being allocated, someone else may allocate it
127 +        * and put onto to list. check this situation
128 +        */
129 +       goto repeat;
130 +
131 +found:
132 +       if (rw) {
133 +               /* exclusive lock: user don't want to share lock at all
134 +                * NOTE: one process may take the same lock several times
135 +                * this functionaly is useful for rename operations */
136 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
137 +                               hl->dl_readers) {
138 +                       spin_unlock(&dl->dl_list_lock);
139 +                       wait_event(hl->dl_wait,
140 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
141 +                       spin_lock(&dl->dl_list_lock);
142 +               }
143 +               hl->dl_writers++;
144 +       } else {
145 +               /* shared lock: user do not want to share lock with writer */
146 +               while (hl->dl_writers) {
147 +                       spin_unlock(&dl->dl_list_lock);
148 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
149 +                       spin_lock(&dl->dl_list_lock);
150 +               }
151 +               hl->dl_readers++;
152 +       }
153 +       hl->dl_pid = current->pid;
154 +       spin_unlock(&dl->dl_list_lock);
155 +
156 +       return hl;
157 +}
158 +
159 +
160 +/*
161 + * dynlock_unlock
162 + *
163 + * user have to specify lockspace (dl) and pointer to lock structure
164 + * returned by dynlock_lock()
165 + *
166 + */
167 +void dynlock_unlock(struct dynlock *dl, void *lock)
168 +{
169 +       struct dynlock_member *hl = lock;
170 +       int wakeup = 0;
171 +       
172 +       spin_lock(&dl->dl_list_lock);
173 +       if (hl->dl_writers) {
174 +               hl->dl_writers--;
175 +               if (hl->dl_writers == 0)
176 +                       wakeup = 1;
177 +       } else {
178 +               hl->dl_readers--;
179 +               if (hl->dl_readers == 0)
180 +                       wakeup = 1;
181 +       }
182 +       if (wakeup) {
183 +               hl->dl_pid = 0;
184 +               wake_up(&hl->dl_wait);
185 +       }
186 +       if (--(hl->dl_refcount) == 0) 
187 +               list_del(&hl->dl_list);
188 +       spin_unlock(&dl->dl_list_lock);
189 +       if (hl->dl_refcount == 0)
190 +               kfree(hl);
191 +}
192 +
193 +EXPORT_SYMBOL(dynlock_init);
194 +EXPORT_SYMBOL(dynlock_lock);
195 +EXPORT_SYMBOL(dynlock_unlock);
196 +
197 --- linux-2.4.18/lib/Makefile~dynamic-locks-2.4.18-chaos        2003-08-29 11:57:40.000000000 +0400
198 +++ linux-2.4.18-alexey/lib/Makefile    2003-09-01 16:35:23.000000000 +0400
199 @@ -8,9 +8,9 @@
200  
201  L_TARGET := lib.a
202  
203 -export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o
204 +export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o dynlocks.o
205  
206 -obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o
207 +obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o bust_spinlocks.o rbtree.o dynlocks.o
208  
209  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
210  obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
211
212 _