Whamcloud - gitweb
LU-7931 tests: setup/cleanup after every test script
[fs/lustre-release.git] / lustre / osd-ldiskfs / osd_dynlocks.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  *
22  * Dynamic Locks
23  *
24  * struct dynlock is lockspace
25  * one may request lock (exclusive or shared) for some value
26  * in that lockspace
27  *
28  */
29
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/sched.h>
33
34 #include <libcfs/libcfs.h>
35
36 #include <obd_support.h>
37 #include "osd_dynlocks.h"
38 #include "osd_internal.h"
39
40 #define DYNLOCK_HANDLE_MAGIC    0xd19a10c
41 #define DYNLOCK_HANDLE_DEAD     0xd1956ee
42 #define DYNLOCK_LIST_MAGIC      0x11ee91e6
43
44 /*
45  * dynlock_init
46  *
47  * initialize lockspace
48  *
49  */
50 void dynlock_init(struct dynlock *dl)
51 {
52         spin_lock_init(&dl->dl_list_lock);
53         INIT_LIST_HEAD(&dl->dl_list);
54         dl->dl_magic = DYNLOCK_LIST_MAGIC;
55 }
56
57 /*
58  * dynlock_lock
59  *
60  * acquires lock (exclusive or shared) in specified lockspace
61  * each lock in lockspace is allocated separately, so user have
62  * to specify GFP flags.
63  * routine returns pointer to lock. this pointer is intended to
64  * be passed to dynlock_unlock
65  *
66  */
67 struct dynlock_handle *dynlock_lock(struct dynlock *dl, unsigned long value,
68                                     enum dynlock_type lt, gfp_t gfp)
69 {
70         struct dynlock_handle *nhl = NULL;
71         struct dynlock_handle *hl;
72
73         BUG_ON(dl == NULL);
74         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
75
76 repeat:
77         /* find requested lock in lockspace */
78         spin_lock(&dl->dl_list_lock);
79         BUG_ON(dl->dl_list.next == NULL);
80         BUG_ON(dl->dl_list.prev == NULL);
81         list_for_each_entry(hl, &dl->dl_list, dh_list) {
82                 BUG_ON(hl->dh_list.next == NULL);
83                 BUG_ON(hl->dh_list.prev == NULL);
84                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
85                 if (hl->dh_value == value) {
86                         /* lock is found */
87                         if (nhl) {
88                                 /* someone else just allocated
89                                  * lock we didn't find and just created
90                                  * so, we drop our lock
91                                  */
92                                 OBD_SLAB_FREE(nhl, dynlock_cachep, sizeof(*nhl));
93                         }
94                         hl->dh_refcount++;
95                         goto found;
96                 }
97         }
98         /* lock not found */
99         if (nhl) {
100                 /* we already have allocated lock. use it */
101                 hl = nhl;
102                 nhl = NULL;
103                 list_add(&hl->dh_list, &dl->dl_list);
104                 goto found;
105         }
106         spin_unlock(&dl->dl_list_lock);
107
108         /* lock not found and we haven't allocated lock yet. allocate it */
109         OBD_SLAB_ALLOC_GFP(nhl, dynlock_cachep, sizeof(*nhl), gfp);
110         if (nhl == NULL)
111                 return NULL;
112         nhl->dh_refcount = 1;
113         nhl->dh_value = value;
114         nhl->dh_readers = 0;
115         nhl->dh_writers = 0;
116         nhl->dh_magic = DYNLOCK_HANDLE_MAGIC;
117         init_waitqueue_head(&nhl->dh_wait);
118
119         /* while lock is being allocated, someone else may allocate it
120          * and put onto to list. check this situation
121          */
122         goto repeat;
123
124 found:
125         if (lt == DLT_WRITE) {
126                 /* exclusive lock: user don't want to share lock at all
127                  * NOTE: one process may take the same lock several times
128                  * this functionaly is useful for rename operations */
129                 while ((hl->dh_writers && hl->dh_pid != current->pid) ||
130                                 hl->dh_readers) {
131                         spin_unlock(&dl->dl_list_lock);
132                         wait_event(hl->dh_wait,
133                                 hl->dh_writers == 0 && hl->dh_readers == 0);
134                         spin_lock(&dl->dl_list_lock);
135                 }
136                 hl->dh_writers++;
137         } else {
138                 /* shared lock: user do not want to share lock with writer */
139                 while (hl->dh_writers) {
140                         spin_unlock(&dl->dl_list_lock);
141                         wait_event(hl->dh_wait, hl->dh_writers == 0);
142                         spin_lock(&dl->dl_list_lock);
143                 }
144                 hl->dh_readers++;
145         }
146         hl->dh_pid = current->pid;
147         spin_unlock(&dl->dl_list_lock);
148
149         return hl;
150 }
151
152 /*
153  * dynlock_unlock
154  *
155  * user have to specify lockspace (dl) and pointer to lock structure
156  * returned by dynlock_lock()
157  *
158  */
159 void dynlock_unlock(struct dynlock *dl, struct dynlock_handle *hl)
160 {
161         int wakeup = 0;
162
163         BUG_ON(dl == NULL);
164         BUG_ON(hl == NULL);
165         BUG_ON(dl->dl_magic != DYNLOCK_LIST_MAGIC);
166
167         if (hl->dh_magic != DYNLOCK_HANDLE_MAGIC)
168                 printk(KERN_EMERG "wrong lock magic: %#x\n", hl->dh_magic);
169
170         BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
171         BUG_ON(hl->dh_writers != 0 && current->pid != hl->dh_pid);
172
173         spin_lock(&dl->dl_list_lock);
174         if (hl->dh_writers) {
175                 BUG_ON(hl->dh_readers != 0);
176                 hl->dh_writers--;
177                 if (hl->dh_writers == 0)
178                         wakeup = 1;
179         } else if (hl->dh_readers) {
180                 hl->dh_readers--;
181                 if (hl->dh_readers == 0)
182                         wakeup = 1;
183         } else {
184                 BUG();
185         }
186         if (wakeup) {
187                 hl->dh_pid = 0;
188                 wake_up(&hl->dh_wait);
189         }
190         if (--(hl->dh_refcount) == 0) {
191                 hl->dh_magic = DYNLOCK_HANDLE_DEAD;
192                 list_del(&hl->dh_list);
193                 OBD_SLAB_FREE(hl, dynlock_cachep, sizeof(*hl));
194         }
195         spin_unlock(&dl->dl_list_lock);
196 }
197
198 int dynlock_is_locked(struct dynlock *dl, unsigned long value)
199 {
200         struct dynlock_handle *hl;
201         int result = 0;
202
203         /* find requested lock in lockspace */
204         spin_lock(&dl->dl_list_lock);
205         BUG_ON(dl->dl_list.next == NULL);
206         BUG_ON(dl->dl_list.prev == NULL);
207         list_for_each_entry(hl, &dl->dl_list, dh_list) {
208                 BUG_ON(hl->dh_list.next == NULL);
209                 BUG_ON(hl->dh_list.prev == NULL);
210                 BUG_ON(hl->dh_magic != DYNLOCK_HANDLE_MAGIC);
211                 if (hl->dh_value == value && hl->dh_pid == current->pid) {
212                         /* lock is found */
213                         result = 1;
214                         break;
215                 }
216         }
217         spin_unlock(&dl->dl_list_lock);
218         return result;
219 }