Whamcloud - gitweb
Branch: b_new_cmd
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_LDLM
25 #ifndef __KERNEL__
26 # include <liblustre.h>
27 #endif
28
29 #include <linux/lustre_dlm.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_lib.h>
32
33 #include "ldlm_internal.h"
34
35 /* The purpose of this function is to return:
36  * - the maximum extent
37  * - containing the requested extent
38  * - and not overlapping existing conflicting extents outside the requested one
39  */
40 static void
41 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
42                             struct ldlm_extent *new_ex)
43 {
44         struct list_head *tmp;
45         ldlm_mode_t req_mode = req->l_req_mode;
46         __u64 req_start = req->l_req_extent.start;
47         __u64 req_end = req->l_req_extent.end;
48         int conflicting = 0;
49         ENTRY;
50
51         lockmode_verify(req_mode);
52
53         list_for_each(tmp, queue) {
54                 struct ldlm_lock *lock;
55                 struct ldlm_extent *l_extent;
56
57                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
58                 l_extent = &lock->l_policy_data.l_extent;
59
60                 if (new_ex->start == req_start && new_ex->end == req_end) {
61                         EXIT;
62                         return;
63                 }
64
65                 /* Don't conflict with ourselves */
66                 if (req == lock)
67                         continue;
68
69                 /* Locks are compatible, overlap doesn't matter */
70                 /* Until bug 20 is fixed, try to avoid granting overlapping
71                  * locks on one client (they take a long time to cancel) */
72                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
73                     lock->l_export != req->l_export)
74                         continue;
75
76                 /* If this is a high-traffic lock, don't grow downwards at all
77                  * or grow upwards too much */
78                 ++conflicting;
79                 if (conflicting > 4)
80                         new_ex->start = req_start;
81
82                 /* If lock doesn't overlap new_ex, skip it. */
83                 if (l_extent->end < new_ex->start ||
84                     l_extent->start > new_ex->end)
85                         continue;
86
87                 /* Locks conflicting in requested extents and we can't satisfy
88                  * both locks, so ignore it.  Either we will ping-pong this
89                  * extent (we would regardless of what extent we granted) or
90                  * lock is unused and it shouldn't limit our extent growth. */
91                 if (lock->l_req_extent.end >= req_start &&
92                     lock->l_req_extent.start <= req_end)
93                         continue;
94
95                 /* We grow extents downwards only as far as they don't overlap
96                  * with already-granted locks, on the assumtion that clients
97                  * will be writing beyond the initial requested end and would
98                  * then need to enqueue a new lock beyond previous request.
99                  * l_req_extent->end strictly < req_start, checked above. */
100                 if (l_extent->start < req_start && new_ex->start != req_start) {
101                         if (l_extent->end >= req_start)
102                                 new_ex->start = req_start;
103                         else
104                                 new_ex->start = min(l_extent->end+1, req_start);
105                 }
106
107                 /* If we need to cancel this lock anyways because our request
108                  * overlaps the granted lock, we grow up to its requested
109                  * extent start instead of limiting this extent, assuming that
110                  * clients are writing forwards and the lock had over grown
111                  * its extent downwards before we enqueued our request. */
112                 if (l_extent->end > req_end) {
113                         if (l_extent->start <= req_end)
114                                 new_ex->end = max(lock->l_req_extent.start - 1,
115                                                   req_end);
116                         else
117                                 new_ex->end = max(l_extent->start - 1, req_end);
118                 }
119         }
120
121 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
122         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
123                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
124                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
125                                           new_ex->end);
126         }
127         EXIT;
128 }
129
130 /* In order to determine the largest possible extent we can grant, we need
131  * to scan all of the queues. */
132 static void ldlm_extent_policy(struct ldlm_resource *res,
133                                struct ldlm_lock *lock, int *flags)
134 {
135         struct ldlm_extent new_ex = { .start = 0, .end = ~0};
136
137         if (lock->l_req_mode == LCK_GROUP)
138                 return;
139
140         ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
141         ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
142
143         if (new_ex.start != lock->l_policy_data.l_extent.start ||
144             new_ex.end != lock->l_policy_data.l_extent.end) {
145                 *flags |= LDLM_FL_LOCK_CHANGED;
146                 lock->l_policy_data.l_extent.start = new_ex.start;
147                 lock->l_policy_data.l_extent.end = new_ex.end;
148         }
149 }
150
151 /* Determine if the lock is compatible with all locks on the queue.
152  * We stop walking the queue if we hit ourselves so we don't take
153  * conflicting locks enqueued after us into accound, or we'd wait forever.
154  *
155  * 0 if the lock is not compatible
156  * 1 if the lock is compatible
157  * 2 if this group lock is compatible and requires no further checking
158  * negative error, such as EWOULDBLOCK for group locks
159  */
160 static int
161 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
162                          int *flags, struct list_head *work_list,
163                          struct list_head **insertp)
164 {
165         struct list_head *tmp;
166         struct list_head *save = NULL;
167         struct ldlm_lock *lock = NULL;
168         ldlm_mode_t req_mode = req->l_req_mode;
169         int compat = 1;
170         int found = 0;
171         ENTRY;
172
173         lockmode_verify(req_mode);
174
175         /* Extent locks are only queued once. We can get back here with
176          * insertp != NULL if the blocking ASTs returned -ERESTART. */ 
177         if (!list_empty(&req->l_res_link))
178                 insertp = NULL;
179
180         if (req->l_req_mode != LCK_GROUP) {
181                 __u64 req_start = req->l_req_extent.start;
182                 __u64 req_end = req->l_req_extent.end;
183
184                 list_for_each(tmp, queue) {
185                         lock = list_entry(tmp, struct ldlm_lock, l_res_link);
186                         if (req == lock)
187                                 break;
188
189                         if (lock->l_req_mode == LCK_GROUP) {
190                                 if (*flags & LDLM_FL_BLOCK_NOWAIT)
191                                         RETURN(-EWOULDBLOCK);
192
193                                 /* No blocking ASTs are sent for group locks. */
194                                 compat = 0;
195
196                                 /* there's a blocking group lock in front
197                                  * of us on the queue.  It can be held
198                                  * indefinitely, so don't timeout. */
199                                 if (insertp) {
200                                         *flags |= LDLM_FL_NO_TIMEOUT;
201                                         /* lock_bitlock(req) is held here. */
202                                         req->l_flags |= LDLM_FL_NO_TIMEOUT;
203                                 }
204
205                                 if (work_list)
206                                         continue;
207                                 else
208                                         break;
209                         }
210
211                         /* locks are compatible, overlap doesn't matter */
212                         if (lockmode_compat(lock->l_req_mode, req_mode))
213                                 continue;
214
215                         if (lock->l_policy_data.l_extent.end < req_start ||
216                             lock->l_policy_data.l_extent.start > req_end)
217                                 continue;
218
219                         compat = 0;
220
221                         if (!work_list)
222                                 break;
223
224                         if (lock->l_blocking_ast)
225                                 ldlm_add_ast_work_item(lock, req, work_list);
226                 }
227
228                 if (insertp)
229                         *insertp = queue;
230
231                 RETURN(compat);
232         }
233
234         list_for_each(tmp, queue) {
235                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
236                 if (req == lock)
237                         break;
238
239                 if (lock->l_req_mode != LCK_GROUP) {
240                         if (lock->l_req_mode != lock->l_granted_mode) {
241                                 /* we must be traversing the waitq. */
242
243                                 /* If a group lock was already found then
244                                  * req can be queued before any extent locks
245                                  * that come after the found group lock. */
246                                 if (found)
247                                         break;
248
249                                 if (!insertp) {
250                                         /* We've hit a conflicting extent lock
251                                          * on the waitq before hitting the req
252                                          * group lock. See comments below. */
253                                         compat = 0;
254                                         break;
255                                 }
256
257                                 /* Group locks are not normally blocked by
258                                  * waiting PR|PW locks. */
259
260                                 /* If NO_TIMEOUT was sent back to the client
261                                  * we can queue the group lock in front of
262                                  * this extent lock. */
263                                 if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
264                                         if (save == NULL)
265                                                 save = tmp;
266                                         continue;
267                                 }
268
269                                 /* If we did NOT send NO_TIMEOUT back to the
270                                  * client for this extent lock then the client
271                                  * could possibly timeout if we queue this
272                                  * group lock before it, so don't. This is the
273                                  * only way to get a conflicting extent lock
274                                  * in front of a group lock on the waitq. */
275                         }
276
277                         compat = 0;
278                         if (!work_list) {
279                                 LASSERT(save == NULL);
280                                 break;
281                         }
282
283                         /* If we previously skipped over some extent locks
284                          * because we thought we were going to queue the 
285                          * group lock in front of them then we need to go back
286                          * and send blocking ASTs for the locks we skipped. */
287                         if (save != NULL) {
288                                 struct ldlm_lock *lck2;
289
290                                 for (; save != tmp; save = save->next) {
291                                         lck2 = list_entry(save,
292                                                           struct ldlm_lock,
293                                                           l_res_link);
294
295                                         /* If there was a group lock after save
296                                          * then we would have exited this loop
297                                          * above. */
298                                         LASSERT(lck2->l_req_mode!=LCK_GROUP);
299
300                                         if (lck2->l_blocking_ast) {
301                                                 ldlm_add_ast_work_item(lck2,req,
302                                                                      work_list);
303                                         }
304                                 }
305                                 save = NULL;
306                         }
307
308                         if (lock->l_blocking_ast)
309                                 ldlm_add_ast_work_item(lock, req, work_list);
310                         continue;
311                 }
312
313                 /* If it was safe to insert a group lock at save,
314                  * i.e. save != NULL, then this group lock already
315                  * on the queue would have been inserted before save. */
316                 LASSERT(save == NULL);
317
318                 /* Note: no blocking ASTs are sent for group locks. */
319
320                 if (lock->l_policy_data.l_extent.gid ==
321                     req->l_policy_data.l_extent.gid) {
322                         /* group locks with this gid already on the waitq. */
323                         found = 2;
324
325                         if (lock->l_req_mode == lock->l_granted_mode) {
326                                 /* if a group lock with this gid has already
327                                  * been granted then grant this one. */
328                                 compat = 2;
329                                 break;
330                         }
331                 } else {
332                         if (found == 2)
333                                 break;
334
335                         /* group locks already exist on the queue. */
336                         found = 1;
337
338                         if (*flags & LDLM_FL_BLOCK_NOWAIT)
339                                 RETURN(-EWOULDBLOCK);
340
341                         compat = 0;
342
343                         /* there's a blocking group lock in front
344                          * of us on the queue.  It can be held
345                          * indefinitely, so don't timeout. */
346                         *flags |= LDLM_FL_NO_TIMEOUT;
347
348                         /* the only reason to continue traversing the
349                          * list at this point is to find the proper
350                          * place to insert the lock in the waitq. */
351                         if (!insertp)
352                                 break;
353                 }
354         }
355
356         if (insertp != NULL) {
357                 if (save != NULL)
358                         *insertp = save;
359                 else
360                         *insertp = tmp;
361         }
362
363         RETURN(compat);
364 }
365
366 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
367   *   - blocking ASTs have already been sent
368   *   - the caller has already initialized req->lr_tmp
369   *   - must call this function with the ns lock held
370   *
371   * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
372   *   - blocking ASTs have not been sent
373   *   - the caller has NOT initialized req->lr_tmp, so we must
374   *   - must call this function with the ns lock held once */
375 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
376                              ldlm_error_t *err, struct list_head *work_list)
377 {
378         struct ldlm_resource *res = lock->l_resource;
379         struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
380         struct list_head *insertp = NULL;
381         int rc, rc2;
382         ENTRY;
383
384         LASSERT(list_empty(&res->lr_converting));
385         *err = ELDLM_OK;
386
387         if (!first_enq) {
388                 /* -EWOULDBLOCK can't occur here since (flags & BLOCK_NOWAIT)
389                  * lock requests would either be granted or fail on their
390                  * first_enq. flags should always be zero here, and if that
391                  * ever changes we want to find out. */
392                 LASSERT(*flags == 0);
393                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock,
394                                               flags, NULL, NULL);
395                 if (rc == 1) {
396                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
397                                                       flags, NULL, NULL);
398                 }
399                 if (rc == 0)
400                         RETURN(LDLM_ITER_STOP);
401
402                 ldlm_resource_unlink_lock(lock);
403                 ldlm_extent_policy(res, lock, flags);
404                 lock_bitlock(lock);
405                 lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
406                 unlock_bitlock(lock);
407                 ldlm_grant_lock(lock, work_list);
408                 RETURN(LDLM_ITER_CONTINUE);
409         }
410
411  restart:
412         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, &rpc_list,
413                                       NULL);
414         if (rc < 0)
415                 GOTO(destroylock, rc);
416         if (rc == 2)
417                 goto grant;
418
419         /* Traverse the waiting list in case there are other conflicting
420          * lock requests ahead of us in the queue and send blocking ASTs */
421         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, &rpc_list,
422                                        &insertp);
423         if (rc2 < 0)
424                 GOTO(destroylock, rc);
425         if (rc + rc2 == 2) {
426  grant:
427                 ldlm_extent_policy(res, lock, flags);
428                 ldlm_resource_unlink_lock(lock);
429                 lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
430                 ldlm_grant_lock(lock, NULL);
431         } else {
432                 /* If either of the compat_queue()s returned failure, then we
433                  * have ASTs to send and must go onto the waiting list.
434                  *
435                  * bug 2322: we used to unlink and re-add here, which was a
436                  * terrible folly -- if we goto restart, we could get
437                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
438                 if (list_empty(&lock->l_res_link))
439                         ldlm_resource_add_lock(res, insertp, lock);
440                 unlock_res(res);
441                 rc = ldlm_run_bl_ast_work(&rpc_list);
442                 lock_res(res);
443                 if (rc == -ERESTART)
444                         GOTO(restart, -ERESTART);
445                 *flags |= LDLM_FL_BLOCK_GRANTED;
446         }
447
448         RETURN(0);
449
450  destroylock:
451         list_del_init(&lock->l_res_link);
452         unlock_res(res);
453         ldlm_lock_destroy(lock);
454         lock_res(res);
455         *err = rc;
456         RETURN(rc);
457 }
458
459 /* When a lock is cancelled by a client, the KMS may undergo change if this
460  * is the "highest lock".  This function returns the new KMS value.
461  * Caller must hold ns_lock already. 
462  *
463  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
464 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
465 {
466         struct ldlm_resource *res = lock->l_resource;
467         struct list_head *tmp;
468         struct ldlm_lock *lck;
469         __u64 kms = 0;
470         ENTRY;
471
472         /* don't let another thread in ldlm_extent_shift_kms race in
473          * just after we finish and take our lock into account in its
474          * calculation of the kms */
475
476         lock->l_flags |= LDLM_FL_KMS_IGNORE;
477
478         list_for_each(tmp, &res->lr_granted) {
479                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
480
481                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
482                         continue;
483
484                 if (lck->l_policy_data.l_extent.end >= old_kms)
485                         RETURN(old_kms);
486
487                 /* This extent _has_ to be smaller than old_kms (checked above)
488                  * so kms can only ever be smaller or the same as old_kms. */
489                 if (lck->l_policy_data.l_extent.end + 1 > kms)
490                         kms = lck->l_policy_data.l_extent.end + 1;
491         }
492         LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
493
494         RETURN(kms);
495 }