Whamcloud - gitweb
- configurable stack size fo x86_64
[fs/lustre-release.git] / lustre / kernel_patches / patches / dynamic-locks-2.4.20-rh.patch
1  include/linux/dynlocks.h |   33 ++++++++++
2  lib/Makefile             |    4 -
3  lib/dynlocks.c           |  152 +++++++++++++++++++++++++++++++++++++++++++++++
4  3 files changed, 187 insertions(+), 2 deletions(-)
5
6 Index: linux-2.4.20-rh/include/linux/dynlocks.h
7 ===================================================================
8 --- linux-2.4.20-rh.orig/include/linux/dynlocks.h       2003-09-04 18:25:49.000000000 +0800
9 +++ linux-2.4.20-rh/include/linux/dynlocks.h    2003-09-04 18:25:49.000000000 +0800
10 @@ -0,0 +1,33 @@
11 +#ifndef _LINUX_DYNLOCKS_H
12 +#define _LINUX_DYNLOCKS_H
13 +
14 +#include <linux/list.h>
15 +#include <linux/wait.h>
16 +
17 +struct dynlock_member {
18 +       struct list_head        dl_list;
19 +       unsigned long           dl_value;       /* lock value */
20 +       int                     dl_refcount;    /* number of users */
21 +       int                     dl_readers;
22 +       int                     dl_writers;
23 +       int                     dl_pid;         /* holder of the lock */
24 +       wait_queue_head_t       dl_wait;
25 +};
26 +
27 +/*
28 + * lock's namespace:
29 + *   - list of locks
30 + *   - lock to protect this list
31 + */
32 +struct dynlock {
33 +       struct list_head dl_list;
34 +       spinlock_t dl_list_lock;
35 +};
36 +
37 +void dynlock_init(struct dynlock *dl);
38 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp);
39 +void dynlock_unlock(struct dynlock *dl, void *lock);
40 +
41 +
42 +#endif
43 +
44 Index: linux-2.4.20-rh/lib/dynlocks.c
45 ===================================================================
46 --- linux-2.4.20-rh.orig/lib/dynlocks.c 2003-09-04 18:25:49.000000000 +0800
47 +++ linux-2.4.20-rh/lib/dynlocks.c      2003-09-04 18:25:49.000000000 +0800
48 @@ -0,0 +1,152 @@
49 +/*
50 + * Dynamic Locks
51 + *
52 + * struct dynlock is lockspace
53 + * one may request lock (exclusive or shared) for some value
54 + * in that lockspace
55 + *
56 + */
57 +
58 +#include <linux/dynlocks.h>
59 +#include <linux/module.h>
60 +#include <linux/slab.h>
61 +#include <linux/sched.h>
62 +
63 +/*
64 + * dynlock_init
65 + *
66 + * initialize lockspace
67 + *
68 + */
69 +void dynlock_init(struct dynlock *dl)
70 +{
71 +       spin_lock_init(&dl->dl_list_lock);
72 +       INIT_LIST_HEAD(&dl->dl_list);
73 +}
74 +
75 +/*
76 + * dynlock_lock
77 + *
78 + * acquires lock (exclusive or shared) in specified lockspace
79 + * each lock in lockspace is allocated separately, so user have
80 + * to specify GFP flags.
81 + * routine returns pointer to lock. this pointer is intended to
82 + * be passed to dynlock_unlock
83 + *
84 + */
85 +void *dynlock_lock(struct dynlock *dl, unsigned long value, int rw, int gfp)
86 +{
87 +       struct dynlock_member *nhl = NULL; 
88 +       struct dynlock_member *hl; 
89 +       struct list_head *cur;
90 +
91 +repeat:
92 +       /* find requested lock in lockspace */
93 +       spin_lock(&dl->dl_list_lock);
94 +       list_for_each(cur, &dl->dl_list) {
95 +               hl = list_entry(cur, struct dynlock_member, dl_list);
96 +               if (hl->dl_value == value) {
97 +                       /* lock is found */
98 +                       if (nhl) {
99 +                               /* someone else just allocated
100 +                                * lock we didn't find and just created
101 +                                * so, we drop our lock
102 +                                */
103 +                               kfree(nhl);
104 +                               nhl = NULL;
105 +                       }
106 +                       hl->dl_refcount++;
107 +                       goto found;
108 +               }
109 +       }
110 +       /* lock not found */
111 +       if (nhl) {
112 +               /* we already have allocated lock. use it */
113 +               hl = nhl;
114 +               nhl = NULL;
115 +               list_add(&hl->dl_list, &dl->dl_list);
116 +               goto found;
117 +       }
118 +       spin_unlock(&dl->dl_list_lock);
119 +       
120 +       /* lock not found and we haven't allocated lock yet. allocate it */
121 +       nhl = kmalloc(sizeof(struct dynlock_member), gfp);
122 +       if (nhl == NULL)
123 +               return NULL;
124 +       nhl->dl_refcount = 1;
125 +       nhl->dl_value = value;
126 +       nhl->dl_readers = 0;
127 +       nhl->dl_writers = 0;
128 +       init_waitqueue_head(&nhl->dl_wait);
129 +
130 +       /* while lock is being allocated, someone else may allocate it
131 +        * and put onto to list. check this situation
132 +        */
133 +       goto repeat;
134 +
135 +found:
136 +       if (rw) {
137 +               /* exclusive lock: user don't want to share lock at all
138 +                * NOTE: one process may take the same lock several times
139 +                * this functionaly is useful for rename operations */
140 +               while ((hl->dl_writers && hl->dl_pid != current->pid) ||
141 +                               hl->dl_readers) {
142 +                       spin_unlock(&dl->dl_list_lock);
143 +                       wait_event(hl->dl_wait,
144 +                               hl->dl_writers == 0 && hl->dl_readers == 0);
145 +                       spin_lock(&dl->dl_list_lock);
146 +               }
147 +               hl->dl_writers++;
148 +       } else {
149 +               /* shared lock: user do not want to share lock with writer */
150 +               while (hl->dl_writers) {
151 +                       spin_unlock(&dl->dl_list_lock);
152 +                       wait_event(hl->dl_wait, hl->dl_writers == 0);
153 +                       spin_lock(&dl->dl_list_lock);
154 +               }
155 +               hl->dl_readers++;
156 +       }
157 +       hl->dl_pid = current->pid;
158 +       spin_unlock(&dl->dl_list_lock);
159 +
160 +       return hl;
161 +}
162 +
163 +
164 +/*
165 + * dynlock_unlock
166 + *
167 + * user have to specify lockspace (dl) and pointer to lock structure
168 + * returned by dynlock_lock()
169 + *
170 + */
171 +void dynlock_unlock(struct dynlock *dl, void *lock)
172 +{
173 +       struct dynlock_member *hl = lock;
174 +       int wakeup = 0;
175 +       
176 +       spin_lock(&dl->dl_list_lock);
177 +       if (hl->dl_writers) {
178 +               hl->dl_writers--;
179 +               if (hl->dl_writers == 0)
180 +                       wakeup = 1;
181 +       } else {
182 +               hl->dl_readers--;
183 +               if (hl->dl_readers == 0)
184 +                       wakeup = 1;
185 +       }
186 +       if (wakeup) {
187 +               hl->dl_pid = 0;
188 +               wake_up(&hl->dl_wait);
189 +       }
190 +       if (--(hl->dl_refcount) == 0) 
191 +               list_del(&hl->dl_list);
192 +       spin_unlock(&dl->dl_list_lock);
193 +       if (hl->dl_refcount == 0)
194 +               kfree(hl);
195 +}
196 +
197 +EXPORT_SYMBOL(dynlock_init);
198 +EXPORT_SYMBOL(dynlock_lock);
199 +EXPORT_SYMBOL(dynlock_unlock);
200 +
201 Index: linux-2.4.20-rh/lib/Makefile
202 ===================================================================
203 --- linux-2.4.20-rh.orig/lib/Makefile   2002-11-29 07:53:15.000000000 +0800
204 +++ linux-2.4.20-rh/lib/Makefile        2003-09-04 18:27:26.000000000 +0800
205 @@ -8,10 +8,10 @@
206  
207  L_TARGET := lib.a
208  
209 -export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o
210 +export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o dynlocks.o
211  
212  obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \
213 -        bust_spinlocks.o rbtree.o dump_stack.o
214 +        bust_spinlocks.o rbtree.o dump_stack.o dynlocks.o
215  
216  obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
217  obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o