Whamcloud - gitweb
land b_cray_delivery on HEAD
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25 #ifndef __KERNEL__
26 # include <liblustre.h>
27 #endif
28
29 #include <linux/lustre_dlm.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_lib.h>
32
33 #include "ldlm_internal.h"
34
35 /* The purpose of this function is to return:
36  * - the maximum extent
37  * - containing the requested extent
38  * - and not overlapping existing conflicting extents outside the requested one
39  */
40 static void
41 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
42                             struct ldlm_extent *new_ex)
43 {
44         struct list_head *tmp;
45         ldlm_mode_t req_mode = req->l_req_mode;
46         __u64 req_start = req->l_req_extent.start;
47         __u64 req_end = req->l_req_extent.end;
48         ENTRY;
49
50         lockmode_verify(req_mode);
51
52         list_for_each(tmp, queue) {
53                 struct ldlm_lock *lock;
54                 struct ldlm_extent *l_extent;
55
56                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
57                 l_extent = &lock->l_policy_data.l_extent;
58
59                 if (new_ex->start == req_start && new_ex->end == req_end) {
60                         EXIT;
61                         return;
62                 }
63
64                 /* Don't conflict with ourselves */
65                 if (req == lock)
66                         continue;
67
68                 /* If lock doesn't overlap new_ex, skip it. */
69                 if (l_extent->end < new_ex->start ||
70                     l_extent->start > new_ex->end)
71                         continue;
72
73                 /* Locks are compatible, overlap doesn't matter */
74                 if (lockmode_compat(lock->l_req_mode, req_mode))
75                         continue;
76
77                 /* Locks conflicting in requested extents and we can't satisfy
78                  * both locks, so ignore it.  Either we will ping-pong this
79                  * extent (we would regardless of what extent we granted) or
80                  * lock is unused and it shouldn't limit our extent growth. */
81                 if (lock->l_req_extent.end >= req_start &&
82                     lock->l_req_extent.start <= req_end)
83                         continue;
84
85                 /* We grow extents downwards only as far as they don't overlap
86                  * with already-granted locks, on the assumtion that clients
87                  * will be writing beyond the initial requested end and would
88                  * then need to enqueue a new lock beyond the previous request.
89                  * We don't grow downwards if there are lots of lockers. */
90                 if (l_extent->start < req_start) {
91                         if (atomic_read(&req->l_resource->lr_refcount) > 20)
92                                 new_ex->start = req_start;
93                         else
94                                 new_ex->start = min(l_extent->end+1, req_start);
95                 }
96
97                 /* If we need to cancel this lock anyways because our request
98                  * overlaps the granted lock, we grow up to its requested
99                  * extent start instead of limiting this extent, assuming that
100                  * clients are writing forwards and the lock had over grown
101                  * its extent downwards before we enqueued our request. */
102                 if (l_extent->end > req_end) {
103                         if (l_extent->start <= req_end)
104                                 new_ex->end = max(lock->l_req_extent.start - 1,
105                                                   req_end);
106                         else
107                                 new_ex->end = max(l_extent->start - 1, req_end);
108                 }
109         }
110         EXIT;
111 }
112
113 /* In order to determine the largest possible extent we can grant, we need
114  * to scan all of the queues. */
115 static void ldlm_extent_policy(struct ldlm_resource *res,
116                                struct ldlm_lock *lock, int *flags)
117 {
118         struct ldlm_extent new_ex = { .start = 0, .end = ~0};
119
120         ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
121         ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
122
123         if (new_ex.start != lock->l_policy_data.l_extent.start ||
124             new_ex.end != lock->l_policy_data.l_extent.end) {
125                 *flags |= LDLM_FL_LOCK_CHANGED;
126                 lock->l_policy_data.l_extent.start = new_ex.start;
127                 lock->l_policy_data.l_extent.end = new_ex.end;
128         }
129 }
130
131 /* Determine if the lock is compatible with all locks on the queue.
132  * We stop walking the queue if we hit ourselves so we don't take
133  * conflicting locks enqueued after us into accound, or we'd wait forever.
134  *
135  * 0 if the lock is not compatible
136  * 1 if the lock is compatible
137  * 2 if this group lock is compatible and requires no further checking
138  * negative error, such as EWOULDBLOCK for group locks
139  */
140 static int
141 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
142                          int send_cbs, int *flags, ldlm_error_t *err)
143 {
144         struct list_head *tmp;
145         struct ldlm_lock *lock;
146         ldlm_mode_t req_mode = req->l_req_mode;
147         __u64 req_start = req->l_req_extent.start;
148         __u64 req_end = req->l_req_extent.end;
149         int compat = 1;
150         ENTRY;
151
152         lockmode_verify(req_mode);
153
154         list_for_each(tmp, queue) {
155                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
156
157                 if (req == lock)
158                         RETURN(compat);
159
160                 /* locks are compatible, overlap doesn't matter */
161                 if (lockmode_compat(lock->l_req_mode, req_mode)) {
162                         /* nonCW locks are compatible, overlap doesn't matter */
163                         if (req_mode != LCK_CW)
164                                 continue;
165                                 
166                         /* If we are trying to get a CW lock and there is
167                            another one of this kind, we need to compare gid */
168                         if (req->l_policy_data.l_extent.gid ==
169                              lock->l_policy_data.l_extent.gid) {
170                                 if (lock->l_req_mode == lock->l_granted_mode)
171                                         RETURN(2);
172
173                                 /* If we are in nonblocking mode - return
174                                    immediately */
175                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
176                                         compat = -EWOULDBLOCK;
177                                         goto destroylock;
178                                 }
179                                 /* If this group lock is compatible with another
180                                  * group lock on the waiting list, they must be
181                                  * together in the list, so they can be granted
182                                  * at the same time.  Otherwise the later lock
183                                  * can get stuck behind another, incompatible,
184                                  * lock. */
185                                 ldlm_resource_insert_lock_after(lock, req);
186                                 /* Because 'lock' is not granted, we can stop
187                                  * processing this queue and return immediately.
188                                  * There is no need to check the rest of the
189                                  * list. */
190                                 RETURN(0);
191                         }
192                 }
193
194                 if (lock->l_req_mode == LCK_CW) {
195                         /* If compared lock is CW, then requested is PR/PW/ =>
196                          * this is not compatible; extent range does not
197                          * matter */
198                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
199                                 compat = -EWOULDBLOCK;
200                                 goto destroylock;
201                         } else {
202                                 *flags |= LDLM_FL_NO_TIMEOUT;
203                         }
204                 } else if (lock->l_policy_data.l_extent.end < req_start ||
205                            lock->l_policy_data.l_extent.start > req_end) {
206                         /* if a non-CW lock doesn't overlap skip it */
207                         continue;
208                 }
209
210                 if (!send_cbs)
211                         RETURN(0);
212
213                 compat = 0;
214                 if (lock->l_blocking_ast)
215                         ldlm_add_ast_work_item(lock, req, NULL, 0);
216         }
217
218         return(compat);
219 destroylock:
220         list_del_init(&req->l_res_link);
221         ldlm_lock_destroy(req);
222         *err = compat;
223         RETURN(compat);
224 }
225
226 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
227   *   - blocking ASTs have already been sent
228   *   - the caller has already initialized req->lr_tmp
229   *   - must call this function with the ns lock held
230   *
231   * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
232   *   - blocking ASTs have not been sent
233   *   - the caller has NOT initialized req->lr_tmp, so we must
234   *   - must call this function with the ns lock held once */
235 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
236                              ldlm_error_t *err)
237 {
238         struct ldlm_resource *res = lock->l_resource;
239         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
240         int rc, rc2;
241         ENTRY;
242
243         LASSERT(list_empty(&res->lr_converting));
244         *err = ELDLM_OK;
245
246         if (!first_enq) {
247                 /* Careful observers will note that we don't handle -EWOULDBLOCK
248                  * here, but it's ok for a non-obvious reason -- compat_queue
249                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
250                  * flags should always be zero here, and if that ever stops
251                  * being true, we want to find out. */
252                 LASSERT(*flags == 0);
253                 LASSERT(res->lr_tmp != NULL);
254                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0, flags,
255                                               err);
256                 if (rc == 1) {
257                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0,
258                                                       flags, err);
259                 }
260                 if (rc == 0)
261                         RETURN(LDLM_ITER_STOP);
262
263                 ldlm_resource_unlink_lock(lock);
264
265                 ldlm_extent_policy(res, lock, flags);
266                 ldlm_grant_lock(lock, NULL, 0, 1);
267                 RETURN(LDLM_ITER_CONTINUE);
268         }
269
270  restart:
271         LASSERT(res->lr_tmp == NULL);
272         res->lr_tmp = &rpc_list;
273         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1, flags, err);
274         if (rc < 0)
275                 RETURN(rc); /* lock was destroyed */
276         if (rc == 2)
277                 goto grant;
278
279         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, 1, flags, err);
280         if (rc2 < 0)
281                 RETURN(rc2); /* lock was destroyed */
282         res->lr_tmp = NULL;
283
284         if (rc + rc2 == 2) {
285         grant:
286                 ldlm_extent_policy(res, lock, flags);
287                 ldlm_resource_unlink_lock(lock);
288                 ldlm_grant_lock(lock, NULL, 0, 0);
289         } else {
290                 /* If either of the compat_queue()s returned failure, then we
291                  * have ASTs to send and must go onto the waiting list.
292                  *
293                  * bug 2322: we used to unlink and re-add here, which was a
294                  * terrible folly -- if we goto restart, we could get
295                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
296                 if (list_empty(&lock->l_res_link))
297                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
298                 l_unlock(&res->lr_namespace->ns_lock);
299                 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
300                 l_lock(&res->lr_namespace->ns_lock);
301                 if (rc == -ERESTART)
302                         GOTO(restart, -ERESTART);
303                 *flags |= LDLM_FL_BLOCK_GRANTED;
304         }
305         RETURN(0);
306 }
307
308 /* When a lock is cancelled by a client, the KMS may undergo change if this
309  * is the "highest lock".  This function returns the new KMS value.
310  *
311  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
312 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
313 {
314         struct ldlm_resource *res = lock->l_resource;
315         struct list_head *tmp;
316         struct ldlm_lock *lck;
317         __u64 kms = 0;
318         ENTRY;
319
320         l_lock(&res->lr_namespace->ns_lock);
321         list_for_each(tmp, &res->lr_granted) {
322                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
323
324                 if (lock == lck)
325                         continue;
326                 if (lck->l_policy_data.l_extent.end >= old_kms)
327                         GOTO(out, kms = old_kms);
328                 kms = lck->l_policy_data.l_extent.end + 1;
329         }
330
331         GOTO(out, kms);
332  out:
333         l_unlock(&res->lr_namespace->ns_lock);
334         return kms;
335 }