Whamcloud - gitweb
0e01a67690364795577b034ae9cb6c82df3d03f7
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_dynlocks.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  *
22  * Dynamic Locks
23  *
24  * struct dynlock is lockspace
25  * one may request lock (exclusive or shared) for some value
26  * in that lockspace
27  *
28  */
29 #ifdef __KERNEL__
30
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/sched.h>
34
35 #include <libcfs/libcfs.h>
36
37 #include <obd_support.h>
38 #include "osd_dynlocks.h"
39
40 extern struct kmem_cache *dynlock_cachep;
41
42 #define DYNLOCK_HANDLE_MAGIC    0xd19a10c
43 #define DYNLOCK_HANDLE_DEAD     0xd1956ee
44 #define DYNLOCK_LIST_MAGIC      0x11ee91e6
45
46 /*
47  * dynlock_init
48  *
49  * initialize lockspace
50  *
51  */
52 void dynlock_init(struct dynlock *dl)
53 {
54         spin_lock_init(&dl->dl_list_lock);
55         INIT_LIST_HEAD(&dl->dl_list);
56         dl->dl_magic = DYNLOCK_LIST_MAGIC;
57 }
58
59 /*
60  * dynlock_lock
61  *
62  * acquires lock (exclusive or shared) in specified lockspace
63  * each lock in lockspace is allocated separately, so user have
64  * to specify GFP flags.
65  * routine returns pointer to lock. this pointer is intended to
66  * be passed to dynlock_unlock
67  *
68  */
69 struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
70                                     enum dynlock_type lt, gfp_t gfp)
71 {
72         struct dynlock_handle *nhl = NULL;
73         struct dynlock_handle *hl;
74
75         BUG_ON(dl == NULL);
76         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
77
78 repeat:
79         /* find requested lock in lockspace */
80         spin_lock(&dl->dl_list_lock);
81         BUG_ON(dl->dl_list.next == NULL);
82         BUG_ON(dl->dl_list.prev == NULL);
83         list_for_each_entry(hl, &dl->dl_list, dh_list) {
84                 BUG_ON(hl->dh_list.next == NULL);
85                 BUG_ON(hl->dh_list.prev == NULL);
86                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
87                 if (hl->dh_value == value) {
88                         /* lock is found */
89                         if (nhl) {
90                                 /* someone else just allocated
91                                  * lock we didn't find and just created
92                                  * so, we drop our lock
93                                  */
94                                 OBD_SLAB_FREE(nhl, dynlock_cachep, sizeof(*nhl));
95                         }
96                         hl->dh_refcount++;
97                         goto found;
98                 }
99         }
100         /* lock not found */
101         if (nhl) {
102                 /* we already have allocated lock. use it */
103                 hl = nhl;
104                 nhl = NULL;
105                 list_add(&hl->dh_list, &dl->dl_list);
106                 goto found;
107         }
108         spin_unlock(&dl->dl_list_lock);
109
110         /* lock not found and we haven't allocated lock yet. allocate it */
111         OBD_SLAB_ALLOC_GFP(nhl, dynlock_cachep, sizeof(*nhl), gfp);
112         if (nhl == NULL)
113                 return NULL;
114         nhl->dh_refcount = 1;
115         nhl->dh_value = value;
116         nhl->dh_readers = 0;
117         nhl->dh_writers = 0;
118         nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
119         init_waitqueue_head(&nhl->dh_wait);
120
121         /* while lock is being allocated, someone else may allocate it
122          * and put onto to list. check this situation
123          */
124         goto repeat;
125
126 found:
127         if (lt == DLT_WRITE) {
128                 /* exclusive lock: user don't want to share lock at all
129                  * NOTE: one process may take the same lock several times
130                  * this functionaly is useful for rename operations */
131                 while ((hl->dh_writers && hl->dh_pid != current->pid) ||
132                                 hl->dh_readers) {
133                         spin_unlock(&dl->dl_list_lock);
134                         wait_event(hl->dh_wait,
135                                 hl->dh_writers == 0 && hl->dh_readers == 0);
136                         spin_lock(&dl->dl_list_lock);
137                 }
138                 hl->dh_writers++;
139         } else {
140                 /* shared lock: user do not want to share lock with writer */
141                 while (hl->dh_writers) {
142                         spin_unlock(&dl->dl_list_lock);
143                         wait_event(hl->dh_wait, hl->dh_writers == 0);
144                         spin_lock(&dl->dl_list_lock);
145                 }
146                 hl->dh_readers++;
147         }
148         hl->dh_pid = current->pid;
149         spin_unlock(&dl->dl_list_lock);
150
151         return hl;
152 }
153
154 /*
155  * dynlock_unlock
156  *
157  * user have to specify lockspace (dl) and pointer to lock structure
158  * returned by dynlock_lock()
159  *
160  */
161 void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
162 {
163         int wakeup = 0;
164
165         BUG_ON(dl == NULL);
166         BUG_ON(hl == NULL);
167         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
168
169         if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
170                 printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
171
172         BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
173         BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
174
175         spin_lock(&dl->dl_list_lock);
176         if (hl->dh_writers) {
177                 BUG_ON(hl->dh_readers != 0);
178                 hl->dh_writers--;
179                 if (hl->dh_writers == 0)
180                         wakeup = 1;
181         } else if (hl->dh_readers) {
182                 hl->dh_readers--;
183                 if (hl->dh_readers == 0)
184                         wakeup = 1;
185         } else {
186                 BUG();
187         }
188         if (wakeup) {
189                 hl->dh_pid = 0;
190                 wake_up(&hl->dh_wait);
191         }
192         if (--(hl->dh_refcount) == 0) {
193                 hl->dh_magic = DYNLOCK_HANDLE_DEAD;
194                 list_del(&hl->dh_list);
195                 OBD_SLAB_FREE(hl, dynlock_cachep, sizeof(*hl));
196         }
197         spin_unlock(&dl->dl_list_lock);
198 }
199
200 int dynlock_is_locked(struct dynlock *dl, unsigned long value)
201 {
202         struct dynlock_handle *hl;
203         int result = 0;
204
205         /* find requested lock in lockspace */
206         spin_lock(&dl->dl_list_lock);
207         BUG_ON(dl->dl_list.next == NULL);
208         BUG_ON(dl->dl_list.prev == NULL);
209         list_for_each_entry(hl, &dl->dl_list, dh_list) {
210                 BUG_ON(hl->dh_list.next == NULL);
211                 BUG_ON(hl->dh_list.prev == NULL);
212                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
213                 if (hl->dh_value == value && hl->dh_pid == current->pid) {
214                         /* lock is found */
215                         result = 1;
216                         break;
217                 }
218         }
219         spin_unlock(&dl->dl_list_lock);
220         return result;
221 }
222 #endif