Whamcloud - gitweb
remove wrong debug code.
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifndef __KERNEL__
29 # include <liblustre.h>
30 #endif
31
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
34 #include <obd.h>
35 #include <lustre_lib.h>
36
37 #include "ldlm_internal.h"
38
39 /* The purpose of this function is to return:
40  * - the maximum extent
41  * - containing the requested extent
42  * - and not overlapping existing conflicting extents outside the requested one
43  */
44 static void
45 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
46                             struct ldlm_extent *new_ex)
47 {
48         struct list_head *tmp;
49         ldlm_mode_t req_mode = req->l_req_mode;
50         __u64 req_start = req->l_req_extent.start;
51         __u64 req_end = req->l_req_extent.end;
52         __u64 req_align, mask;
53         int conflicting = 0;
54         ENTRY;
55
56         lockmode_verify(req_mode);
57
58         list_for_each(tmp, queue) {
59                 struct ldlm_lock *lock;
60                 struct ldlm_extent *l_extent;
61
62                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
63                 l_extent = &lock->l_policy_data.l_extent;
64
65                 /* We already hit the minimum requested size, search no more */
66                 if (new_ex->start == req_start && new_ex->end == req_end) {
67                         EXIT;
68                         return;
69                 }
70
71                 /* Don't conflict with ourselves */
72                 if (req == lock)
73                         continue;
74
75                 /* Locks are compatible, overlap doesn't matter */
76                 /* Until bug 20 is fixed, try to avoid granting overlapping
77                  * locks on one client (they take a long time to cancel) */
78                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
79                     lock->l_export != req->l_export)
80                         continue;
81
82                 /* If this is a high-traffic lock, don't grow downwards at all
83                  * or grow upwards too much */
84                 ++conflicting;
85                 if (conflicting > 4)
86                         new_ex->start = req_start;
87
88                 /* If lock doesn't overlap new_ex, skip it. */
89                 if (l_extent->end < new_ex->start ||
90                     l_extent->start > new_ex->end)
91                         continue;
92
93                 /* Locks conflicting in requested extents and we can't satisfy
94                  * both locks, so ignore it.  Either we will ping-pong this
95                  * extent (we would regardless of what extent we granted) or
96                  * lock is unused and it shouldn't limit our extent growth. */
97                 if (lock->l_req_extent.end >= req_start &&
98                     lock->l_req_extent.start <= req_end)
99                         continue;
100
101                 /* We grow extents downwards only as far as they don't overlap
102                  * with already-granted locks, on the assumtion that clients
103                  * will be writing beyond the initial requested end and would
104                  * then need to enqueue a new lock beyond previous request.
105                  * l_req_extent->end strictly < req_start, checked above. */
106                 if (l_extent->start < req_start && new_ex->start != req_start) {
107                         if (l_extent->end >= req_start)
108                                 new_ex->start = req_start;
109                         else
110                                 new_ex->start = min(l_extent->end+1, req_start);
111                 }
112
113                 /* If we need to cancel this lock anyways because our request
114                  * overlaps the granted lock, we grow up to its requested
115                  * extent start instead of limiting this extent, assuming that
116                  * clients are writing forwards and the lock had over grown
117                  * its extent downwards before we enqueued our request. */
118                 if (l_extent->end > req_end) {
119                         if (l_extent->start <= req_end)
120                                 new_ex->end = max(lock->l_req_extent.start - 1,
121                                                   req_end);
122                         else
123                                 new_ex->end = max(l_extent->start - 1, req_end);
124                 }
125         }
126
127 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
128         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
129                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
130                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
131                                           new_ex->end);
132         }
133
134         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
135                 EXIT;
136                 return;
137         }
138
139         /* we need to ensure that the lock extent is properly aligned to what
140          * the client requested.  We align it to the lowest-common denominator
141          * of the clients requested lock start and end alignment. */
142         mask = 0x1000ULL;
143         req_align = (req_end + 1) | req_start;
144         if (req_align != 0) {
145                 while ((req_align & mask) == 0)
146                         mask <<= 1;
147         }
148         mask -= 1;
149         /* We can only shrink the lock, not grow it.
150          * This should never cause lock to be smaller than requested,
151          * since requested lock was already aligned on these boundaries. */
152         new_ex->start = ((new_ex->start - 1) | mask) + 1;
153         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
154         LASSERTF(new_ex->start <= req_start,
155                  "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
156                  mask, new_ex->start, req_start);
157         LASSERTF(new_ex->end >= req_end,
158                  "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
159                  mask, new_ex->end, req_end);
160
161         EXIT;
162 }
163
164 /* In order to determine the largest possible extent we can grant, we need
165  * to scan all of the queues. */
166 static void ldlm_extent_policy(struct ldlm_resource *res,
167                                struct ldlm_lock *lock, int *flags)
168 {
169         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
170
171         if (lock->l_export == NULL)
172                 /*
173                  * this is local lock taken by server (e.g., as a part of
174                  * OST-side locking, or unlink handling). Expansion doesn't
175                  * make a lot of sense for local locks, because they are
176                  * dropped immediately on operation completion and would only
177                  * conflict with other threads.
178                  */
179                 return;
180
181         if (lock->l_policy_data.l_extent.start == 0 &&
182             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
183                 /* fast-path whole file locks */
184                 return;
185
186         ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
187         ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
188
189         if (new_ex.start != lock->l_policy_data.l_extent.start ||
190             new_ex.end != lock->l_policy_data.l_extent.end) {
191                 *flags |= LDLM_FL_LOCK_CHANGED;
192                 lock->l_policy_data.l_extent.start = new_ex.start;
193                 lock->l_policy_data.l_extent.end = new_ex.end;
194         }
195 }
196
197 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
198 {
199         struct ldlm_resource *res = lock->l_resource;
200         cfs_time_t now = cfs_time_current();
201
202         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
203         if (contended_locks > res->lr_namespace->ns_contended_locks)
204                 res->lr_contention_time = now;
205         return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
206                 cfs_time_seconds(res->lr_namespace->ns_contention_time)));
207 }
208
209 /* Determine if the lock is compatible with all locks on the queue.
210  * We stop walking the queue if we hit ourselves so we don't take
211  * conflicting locks enqueued after us into accound, or we'd wait forever.
212  *
213  * 0 if the lock is not compatible
214  * 1 if the lock is compatible
215  * 2 if this group lock is compatible and requires no further checking
216  * negative error, such as EWOULDBLOCK for group locks
217  */
218 static int
219 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
220                          int *flags, ldlm_error_t *err,
221                          struct list_head *work_list, int *contended_locks)
222 {
223         struct list_head *tmp;
224         struct ldlm_lock *lock;
225         ldlm_mode_t req_mode = req->l_req_mode;
226         __u64 req_start = req->l_req_extent.start;
227         __u64 req_end = req->l_req_extent.end;
228         int compat = 1;
229         int scan = 0;
230         int check_contention;
231         ENTRY;
232
233         lockmode_verify(req_mode);
234
235         list_for_each(tmp, queue) {
236                 check_contention = 1;
237
238                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
239
240                 if (req == lock)
241                         break;
242
243                 if (unlikely(scan)) {
244                         /* We only get here if we are queuing GROUP lock
245                            and met some incompatible one. The main idea of this
246                            code is to insert GROUP lock past compatible GROUP
247                            lock in the waiting queue or if there is not any,
248                            then in front of first non-GROUP lock */
249                         if (lock->l_req_mode != LCK_GROUP) {
250                                 /* Ok, we hit non-GROUP lock, there should be no
251                                 more GROUP locks later on, queue in front of
252                                 first non-GROUP lock */
253
254                                 ldlm_resource_insert_lock_after(lock, req);
255                                 list_del_init(&lock->l_res_link);
256                                 ldlm_resource_insert_lock_after(req, lock);
257                                 compat = 0;
258                                 break;
259                         }
260                         if (req->l_policy_data.l_extent.gid ==
261                              lock->l_policy_data.l_extent.gid) {
262                                 /* found it */
263                                 ldlm_resource_insert_lock_after(lock, req);
264                                 compat = 0;
265                                 break;
266                         }
267                         continue;
268                 }
269
270                 /* locks are compatible, overlap doesn't matter */
271                 if (lockmode_compat(lock->l_req_mode, req_mode)) {
272                         /* non-group locks are compatible, overlap doesn't
273                            matter */
274                         if (likely(req_mode != LCK_GROUP))
275                                 continue;
276
277                         /* If we are trying to get a GROUP lock and there is
278                            another one of this kind, we need to compare gid */
279                         if (req->l_policy_data.l_extent.gid ==
280                             lock->l_policy_data.l_extent.gid) {
281                                 /* If existing lock with matched gid is granted,
282                                    we grant new one too. */
283                                 if (lock->l_req_mode == lock->l_granted_mode)
284                                         RETURN(2);
285
286                                 /* Otherwise we are scanning queue of waiting
287                                  * locks and it means current request would
288                                  * block along with existing lock (that is
289                                  * already blocked.
290                                  * If we are in nonblocking mode - return
291                                  * immediately */
292                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
293                                         compat = -EWOULDBLOCK;
294                                         goto destroylock;
295                                 }
296                                 /* If this group lock is compatible with another
297                                  * group lock on the waiting list, they must be
298                                  * together in the list, so they can be granted
299                                  * at the same time.  Otherwise the later lock
300                                  * can get stuck behind another, incompatible,
301                                  * lock. */
302                                 ldlm_resource_insert_lock_after(lock, req);
303                                 /* Because 'lock' is not granted, we can stop
304                                  * processing this queue and return immediately.
305                                  * There is no need to check the rest of the
306                                  * list. */
307                                 RETURN(0);
308                         }
309                 }
310
311                 if (unlikely(req_mode == LCK_GROUP &&
312                              (lock->l_req_mode != lock->l_granted_mode))) {
313                         scan = 1;
314                         compat = 0;
315                         if (lock->l_req_mode != LCK_GROUP) {
316                                 /* Ok, we hit non-GROUP lock, there should
317                                  * be no more GROUP locks later on, queue in
318                                  * front of first non-GROUP lock */
319
320                                 ldlm_resource_insert_lock_after(lock, req);
321                                 list_del_init(&lock->l_res_link);
322                                 ldlm_resource_insert_lock_after(req, lock);
323                                 break;
324                         }
325                         if (req->l_policy_data.l_extent.gid ==
326                              lock->l_policy_data.l_extent.gid) {
327                                 /* found it */
328                                 ldlm_resource_insert_lock_after(lock, req);
329                                 break;
330                         }
331                         continue;
332                 }
333
334                 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
335                         /* If compared lock is GROUP, then requested is PR/PW/
336                          * so this is not compatible; extent range does not
337                          * matter */
338                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
339                                 compat = -EWOULDBLOCK;
340                                 goto destroylock;
341                         } else {
342                                 *flags |= LDLM_FL_NO_TIMEOUT;
343                         }
344                 } else if (lock->l_policy_data.l_extent.end < req_start ||
345                            lock->l_policy_data.l_extent.start > req_end) {
346                         /* if a non group lock doesn't overlap skip it */
347                         continue;
348                 } else if (lock->l_req_extent.end < req_start ||
349                            lock->l_req_extent.start > req_end)
350                         /* false contention, the requests doesn't really overlap */
351                                 check_contention = 0;
352
353                 if (!work_list)
354                         RETURN(0);
355
356                 /* don't count conflicting glimpse locks */
357                 if (lock->l_req_mode == LCK_PR &&
358                     lock->l_policy_data.l_extent.start == 0 &&
359                     lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
360                         check_contention = 0;
361
362                 *contended_locks += check_contention;
363
364                 compat = 0;
365                 if (lock->l_blocking_ast)
366                         ldlm_add_ast_work_item(lock, req, work_list);
367         }
368
369         if (ldlm_check_contention(req, *contended_locks) &&
370             compat == 0 &&
371             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
372             req->l_req_mode != LCK_GROUP &&
373             req_end - req_start <=
374             req->l_resource->lr_namespace->ns_max_nolock_size)
375                 GOTO(destroylock, compat = -EUSERS);
376
377         RETURN(compat);
378 destroylock:
379         list_del_init(&req->l_res_link);
380         ldlm_lock_destroy_nolock(req);
381         *err = compat;
382         RETURN(compat);
383 }
384
385 static void discard_bl_list(struct list_head *bl_list)
386 {
387         struct list_head *tmp, *pos;
388         ENTRY;
389
390         list_for_each_safe(pos, tmp, bl_list) {
391                 struct ldlm_lock *lock =
392                         list_entry(pos, struct ldlm_lock, l_bl_ast);
393
394                 list_del_init(&lock->l_bl_ast);
395                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
396                 lock->l_flags &= ~LDLM_FL_AST_SENT;
397                 LASSERT(lock->l_bl_ast_run == 0);
398                 LASSERT(lock->l_blocking_lock);
399                 LDLM_LOCK_PUT(lock->l_blocking_lock);
400                 lock->l_blocking_lock = NULL;
401                 LDLM_LOCK_PUT(lock);
402         }
403         EXIT;
404 }
405
406 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
407   *   - blocking ASTs have already been sent
408   *   - must call this function with the ns lock held
409   *
410   * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
411   *   - blocking ASTs have not been sent
412   *   - must call this function with the ns lock held once */
413 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
414                              ldlm_error_t *err, struct list_head *work_list)
415 {
416         struct ldlm_resource *res = lock->l_resource;
417         struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
418         int rc, rc2;
419         int contended_locks = 0;
420         ENTRY;
421
422         LASSERT(list_empty(&res->lr_converting));
423         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
424                 !(lock->l_flags & LDLM_AST_DISCARD_DATA));
425         check_res_locked(res);
426         *err = ELDLM_OK;
427
428         if (!first_enq) {
429                 /* Careful observers will note that we don't handle -EWOULDBLOCK
430                  * here, but it's ok for a non-obvious reason -- compat_queue
431                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
432                  * flags should always be zero here, and if that ever stops
433                  * being true, we want to find out. */
434                 LASSERT(*flags == 0);
435                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
436                                               err, NULL, &contended_locks);
437                 if (rc == 1) {
438                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
439                                                       flags, err, NULL,
440                                                       &contended_locks);
441                 }
442                 if (rc == 0)
443                         RETURN(LDLM_ITER_STOP);
444
445                 ldlm_resource_unlink_lock(lock);
446
447                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
448                         ldlm_extent_policy(res, lock, flags);
449                 ldlm_grant_lock(lock, work_list);
450                 RETURN(LDLM_ITER_CONTINUE);
451         }
452
453  restart:
454         contended_locks = 0;
455         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
456                                       &rpc_list, &contended_locks);
457         if (rc < 0)
458                 GOTO(out, rc); /* lock was destroyed */
459         if (rc == 2)
460                 goto grant;
461
462         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
463                                        &rpc_list, &contended_locks);
464         if (rc2 < 0)
465                 GOTO(out, rc = rc2); /* lock was destroyed */
466
467         if (rc + rc2 == 2) {
468         grant:
469                 ldlm_extent_policy(res, lock, flags);
470                 ldlm_resource_unlink_lock(lock);
471                 ldlm_grant_lock(lock, NULL);
472         } else {
473                 /* If either of the compat_queue()s returned failure, then we
474                  * have ASTs to send and must go onto the waiting list.
475                  *
476                  * bug 2322: we used to unlink and re-add here, which was a
477                  * terrible folly -- if we goto restart, we could get
478                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
479                 if (list_empty(&lock->l_res_link))
480                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
481                 unlock_res(res);
482                 rc = ldlm_run_bl_ast_work(&rpc_list);
483                 lock_res(res);
484                 if (rc == -ERESTART)
485                         GOTO(restart, -ERESTART);
486                 *flags |= LDLM_FL_BLOCK_GRANTED;
487                 /* this way we force client to wait for the lock
488                  * endlessly once the lock is enqueued -bzzz */
489                 *flags |= LDLM_FL_NO_TIMEOUT;
490
491         }
492         RETURN(0);
493 out:
494         if (!list_empty(&rpc_list)) {
495                 LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
496                 discard_bl_list(&rpc_list);
497         }
498         RETURN(rc);
499 }
500
501 /* When a lock is cancelled by a client, the KMS may undergo change if this
502  * is the "highest lock".  This function returns the new KMS value.
503  * Caller must hold ns_lock already.
504  *
505  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
506 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
507 {
508         struct ldlm_resource *res = lock->l_resource;
509         struct list_head *tmp;
510         struct ldlm_lock *lck;
511         __u64 kms = 0;
512         ENTRY;
513
514         /* don't let another thread in ldlm_extent_shift_kms race in
515          * just after we finish and take our lock into account in its
516          * calculation of the kms */
517         lock->l_flags |= LDLM_FL_KMS_IGNORE;
518
519         list_for_each(tmp, &res->lr_granted) {
520                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
521
522                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
523                         continue;
524
525                 if (lck->l_policy_data.l_extent.end >= old_kms)
526                         RETURN(old_kms);
527
528                 /* This extent _has_ to be smaller than old_kms (checked above)
529                  * so kms can only ever be smaller or the same as old_kms. */
530                 if (lck->l_policy_data.l_extent.end + 1 > kms)
531                         kms = lck->l_policy_data.l_extent.end + 1;
532         }
533         LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
534
535         RETURN(kms);
536 }