Whamcloud - gitweb
e14fab48b351c904edd5dff8d17c54289ed566ef
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_dynlocks.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  *
22  * Dynamic Locks
23  *
24  * struct dynlock is lockspace
25  * one may request lock (exclusive or shared) for some value
26  * in that lockspace
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/sched.h>
33
34 #include <libcfs/libcfs.h>
35
36 #include <obd_support.h>
37 #include "osd_dynlocks.h"
38
39 extern struct kmem_cache *dynlock_cachep;
40
41 #define DYNLOCK_HANDLE_MAGIC    0xd19a10c
42 #define DYNLOCK_HANDLE_DEAD     0xd1956ee
43 #define DYNLOCK_LIST_MAGIC      0x11ee91e6
44
45 /*
46  * dynlock_init
47  *
48  * initialize lockspace
49  *
50  */
51 void dynlock_init(struct dynlock *dl)
52 {
53         spin_lock_init(&dl->dl_list_lock);
54         INIT_LIST_HEAD(&dl->dl_list);
55         dl->dl_magic = DYNLOCK_LIST_MAGIC;
56 }
57
58 /*
59  * dynlock_lock
60  *
61  * acquires lock (exclusive or shared) in specified lockspace
62  * each lock in lockspace is allocated separately, so user have
63  * to specify GFP flags.
64  * routine returns pointer to lock. this pointer is intended to
65  * be passed to dynlock_unlock
66  *
67  */
68 struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
69                                     enum dynlock_type lt, gfp_t gfp)
70 {
71         struct dynlock_handle *nhl = NULL;
72         struct dynlock_handle *hl;
73
74         BUG_ON(dl == NULL);
75         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
76
77 repeat:
78         /* find requested lock in lockspace */
79         spin_lock(&dl->dl_list_lock);
80         BUG_ON(dl->dl_list.next == NULL);
81         BUG_ON(dl->dl_list.prev == NULL);
82         list_for_each_entry(hl, &dl->dl_list, dh_list) {
83                 BUG_ON(hl->dh_list.next == NULL);
84                 BUG_ON(hl->dh_list.prev == NULL);
85                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
86                 if (hl->dh_value == value) {
87                         /* lock is found */
88                         if (nhl) {
89                                 /* someone else just allocated
90                                  * lock we didn't find and just created
91                                  * so, we drop our lock
92                                  */
93                                 OBD_SLAB_FREE(nhl, dynlock_cachep, sizeof(*nhl));
94                         }
95                         hl->dh_refcount++;
96                         goto found;
97                 }
98         }
99         /* lock not found */
100         if (nhl) {
101                 /* we already have allocated lock. use it */
102                 hl = nhl;
103                 nhl = NULL;
104                 list_add(&hl->dh_list, &dl->dl_list);
105                 goto found;
106         }
107         spin_unlock(&dl->dl_list_lock);
108
109         /* lock not found and we haven't allocated lock yet. allocate it */
110         OBD_SLAB_ALLOC_GFP(nhl, dynlock_cachep, sizeof(*nhl), gfp);
111         if (nhl == NULL)
112                 return NULL;
113         nhl->dh_refcount = 1;
114         nhl->dh_value = value;
115         nhl->dh_readers = 0;
116         nhl->dh_writers = 0;
117         nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
118         init_waitqueue_head(&nhl->dh_wait);
119
120         /* while lock is being allocated, someone else may allocate it
121          * and put onto to list. check this situation
122          */
123         goto repeat;
124
125 found:
126         if (lt == DLT_WRITE) {
127                 /* exclusive lock: user don't want to share lock at all
128                  * NOTE: one process may take the same lock several times
129                  * this functionaly is useful for rename operations */
130                 while ((hl->dh_writers && hl->dh_pid != current->pid) ||
131                                 hl->dh_readers) {
132                         spin_unlock(&dl->dl_list_lock);
133                         wait_event(hl->dh_wait,
134                                 hl->dh_writers == 0 && hl->dh_readers == 0);
135                         spin_lock(&dl->dl_list_lock);
136                 }
137                 hl->dh_writers++;
138         } else {
139                 /* shared lock: user do not want to share lock with writer */
140                 while (hl->dh_writers) {
141                         spin_unlock(&dl->dl_list_lock);
142                         wait_event(hl->dh_wait, hl->dh_writers == 0);
143                         spin_lock(&dl->dl_list_lock);
144                 }
145                 hl->dh_readers++;
146         }
147         hl->dh_pid = current->pid;
148         spin_unlock(&dl->dl_list_lock);
149
150         return hl;
151 }
152
153 /*
154  * dynlock_unlock
155  *
156  * user have to specify lockspace (dl) and pointer to lock structure
157  * returned by dynlock_lock()
158  *
159  */
160 void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
161 {
162         int wakeup = 0;
163
164         BUG_ON(dl == NULL);
165         BUG_ON(hl == NULL);
166         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
167
168         if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
169                 printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
170
171         BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
172         BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
173
174         spin_lock(&dl->dl_list_lock);
175         if (hl->dh_writers) {
176                 BUG_ON(hl->dh_readers != 0);
177                 hl->dh_writers--;
178                 if (hl->dh_writers == 0)
179                         wakeup = 1;
180         } else if (hl->dh_readers) {
181                 hl->dh_readers--;
182                 if (hl->dh_readers == 0)
183                         wakeup = 1;
184         } else {
185                 BUG();
186         }
187         if (wakeup) {
188                 hl->dh_pid = 0;
189                 wake_up(&hl->dh_wait);
190         }
191         if (--(hl->dh_refcount) == 0) {
192                 hl->dh_magic = DYNLOCK_HANDLE_DEAD;
193                 list_del(&hl->dh_list);
194                 OBD_SLAB_FREE(hl, dynlock_cachep, sizeof(*hl));
195         }
196         spin_unlock(&dl->dl_list_lock);
197 }
198
199 int dynlock_is_locked(struct dynlock *dl, unsigned long value)
200 {
201         struct dynlock_handle *hl;
202         int result = 0;
203
204         /* find requested lock in lockspace */
205         spin_lock(&dl->dl_list_lock);
206         BUG_ON(dl->dl_list.next == NULL);
207         BUG_ON(dl->dl_list.prev == NULL);
208         list_for_each_entry(hl, &dl->dl_list, dh_list) {
209                 BUG_ON(hl->dh_list.next == NULL);
210                 BUG_ON(hl->dh_list.prev == NULL);
211                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
212                 if (hl->dh_value == value && hl->dh_pid == current->pid) {
213                         /* lock is found */
214                         result = 1;
215                         break;
216                 }
217         }
218         spin_unlock(&dl->dl_list_lock);
219         return result;
220 }