1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
29 # include <liblustre.h>
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
35 #include <lustre_lib.h>
37 #include "ldlm_internal.h"
39 /* The purpose of this function is to return:
40 * - the maximum extent
41 * - containing the requested extent
42 * - and not overlapping existing conflicting extents outside the requested one
45 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
46 struct ldlm_extent *new_ex)
48 struct list_head *tmp;
49 ldlm_mode_t req_mode = req->l_req_mode;
50 __u64 req_start = req->l_req_extent.start;
51 __u64 req_end = req->l_req_extent.end;
52 __u64 req_align, mask;
56 lockmode_verify(req_mode);
58 list_for_each(tmp, queue) {
59 struct ldlm_lock *lock;
60 struct ldlm_extent *l_extent;
62 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
63 l_extent = &lock->l_policy_data.l_extent;
65 /* We already hit the minimum requested size, search no more */
66 if (new_ex->start == req_start && new_ex->end == req_end) {
71 /* Don't conflict with ourselves */
75 /* Locks are compatible, overlap doesn't matter */
76 /* Until bug 20 is fixed, try to avoid granting overlapping
77 * locks on one client (they take a long time to cancel) */
78 if (lockmode_compat(lock->l_req_mode, req_mode) &&
79 lock->l_export != req->l_export)
82 /* If this is a high-traffic lock, don't grow downwards at all
83 * or grow upwards too much */
86 new_ex->start = req_start;
88 /* If lock doesn't overlap new_ex, skip it. */
89 if (l_extent->end < new_ex->start ||
90 l_extent->start > new_ex->end)
93 /* Locks conflicting in requested extents and we can't satisfy
94 * both locks, so ignore it. Either we will ping-pong this
95 * extent (we would regardless of what extent we granted) or
96 * lock is unused and it shouldn't limit our extent growth. */
97 if (lock->l_req_extent.end >= req_start &&
98 lock->l_req_extent.start <= req_end)
101 /* We grow extents downwards only as far as they don't overlap
102 * with already-granted locks, on the assumtion that clients
103 * will be writing beyond the initial requested end and would
104 * then need to enqueue a new lock beyond previous request.
105 * l_req_extent->end strictly < req_start, checked above. */
106 if (l_extent->start < req_start && new_ex->start != req_start) {
107 if (l_extent->end >= req_start)
108 new_ex->start = req_start;
110 new_ex->start = min(l_extent->end+1, req_start);
113 /* If we need to cancel this lock anyways because our request
114 * overlaps the granted lock, we grow up to its requested
115 * extent start instead of limiting this extent, assuming that
116 * clients are writing forwards and the lock had over grown
117 * its extent downwards before we enqueued our request. */
118 if (l_extent->end > req_end) {
119 if (l_extent->start <= req_end)
120 new_ex->end = max(lock->l_req_extent.start - 1,
123 new_ex->end = max(l_extent->start - 1, req_end);
127 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
128 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
129 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
130 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
134 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
139 /* we need to ensure that the lock extent is properly aligned to what
140 * the client requested. We align it to the lowest-common denominator
141 * of the clients requested lock start and end alignment. */
143 req_align = (req_end + 1) | req_start;
144 if (req_align != 0) {
145 while ((req_align & mask) == 0)
149 /* We can only shrink the lock, not grow it.
150 * This should never cause lock to be smaller than requested,
151 * since requested lock was already aligned on these boundaries. */
152 new_ex->start = ((new_ex->start - 1) | mask) + 1;
153 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
154 LASSERTF(new_ex->start <= req_start,
155 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
156 mask, new_ex->start, req_start);
157 LASSERTF(new_ex->end >= req_end,
158 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
159 mask, new_ex->end, req_end);
164 /* In order to determine the largest possible extent we can grant, we need
165 * to scan all of the queues. */
166 static void ldlm_extent_policy(struct ldlm_resource *res,
167 struct ldlm_lock *lock, int *flags)
169 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
171 if (lock->l_export == NULL)
173 * this is local lock taken by server (e.g., as a part of
174 * OST-side locking, or unlink handling). Expansion doesn't
175 * make a lot of sense for local locks, because they are
176 * dropped immediately on operation completion and would only
177 * conflict with other threads.
181 if (lock->l_policy_data.l_extent.start == 0 &&
182 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
183 /* fast-path whole file locks */
186 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
187 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
189 if (new_ex.start != lock->l_policy_data.l_extent.start ||
190 new_ex.end != lock->l_policy_data.l_extent.end) {
191 *flags |= LDLM_FL_LOCK_CHANGED;
192 lock->l_policy_data.l_extent.start = new_ex.start;
193 lock->l_policy_data.l_extent.end = new_ex.end;
197 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
199 struct ldlm_resource *res = lock->l_resource;
200 cfs_time_t now = cfs_time_current();
202 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
203 if (contended_locks > res->lr_namespace->ns_contended_locks)
204 res->lr_contention_time = now;
205 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
206 cfs_time_seconds(res->lr_namespace->ns_contention_time)));
209 /* Determine if the lock is compatible with all locks on the queue.
210 * We stop walking the queue if we hit ourselves so we don't take
211 * conflicting locks enqueued after us into accound, or we'd wait forever.
213 * 0 if the lock is not compatible
214 * 1 if the lock is compatible
215 * 2 if this group lock is compatible and requires no further checking
216 * negative error, such as EWOULDBLOCK for group locks
219 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
220 int *flags, ldlm_error_t *err,
221 struct list_head *work_list, int *contended_locks)
223 struct list_head *tmp;
224 struct ldlm_lock *lock;
225 ldlm_mode_t req_mode = req->l_req_mode;
226 __u64 req_start = req->l_req_extent.start;
227 __u64 req_end = req->l_req_extent.end;
232 lockmode_verify(req_mode);
234 list_for_each(tmp, queue) {
235 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
240 if (unlikely(scan)) {
241 /* We only get here if we are queuing GROUP lock
242 and met some incompatible one. The main idea of this
243 code is to insert GROUP lock past compatible GROUP
244 lock in the waiting queue or if there is not any,
245 then in front of first non-GROUP lock */
246 if (lock->l_req_mode != LCK_GROUP) {
247 /* Ok, we hit non-GROUP lock, there should be no
248 more GROUP locks later on, queue in front of
249 first non-GROUP lock */
251 ldlm_resource_insert_lock_after(lock, req);
252 list_del_init(&lock->l_res_link);
253 ldlm_resource_insert_lock_after(req, lock);
257 if (req->l_policy_data.l_extent.gid ==
258 lock->l_policy_data.l_extent.gid) {
260 ldlm_resource_insert_lock_after(lock, req);
267 /* locks are compatible, overlap doesn't matter */
268 if (lockmode_compat(lock->l_req_mode, req_mode)) {
269 /* non-group locks are compatible, overlap doesn't
271 if (likely(req_mode != LCK_GROUP))
274 /* If we are trying to get a GROUP lock and there is
275 another one of this kind, we need to compare gid */
276 if (req->l_policy_data.l_extent.gid ==
277 lock->l_policy_data.l_extent.gid) {
278 /* If existing lock with matched gid is granted,
279 we grant new one too. */
280 if (lock->l_req_mode == lock->l_granted_mode)
283 /* Otherwise we are scanning queue of waiting
284 * locks and it means current request would
285 * block along with existing lock (that is
287 * If we are in nonblocking mode - return
289 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
290 compat = -EWOULDBLOCK;
293 /* If this group lock is compatible with another
294 * group lock on the waiting list, they must be
295 * together in the list, so they can be granted
296 * at the same time. Otherwise the later lock
297 * can get stuck behind another, incompatible,
299 ldlm_resource_insert_lock_after(lock, req);
300 /* Because 'lock' is not granted, we can stop
301 * processing this queue and return immediately.
302 * There is no need to check the rest of the
308 if (unlikely(req_mode == LCK_GROUP &&
309 (lock->l_req_mode != lock->l_granted_mode))) {
312 if (lock->l_req_mode != LCK_GROUP) {
313 /* Ok, we hit non-GROUP lock, there should
314 * be no more GROUP locks later on, queue in
315 * front of first non-GROUP lock */
317 ldlm_resource_insert_lock_after(lock, req);
318 list_del_init(&lock->l_res_link);
319 ldlm_resource_insert_lock_after(req, lock);
322 if (req->l_policy_data.l_extent.gid ==
323 lock->l_policy_data.l_extent.gid) {
325 ldlm_resource_insert_lock_after(lock, req);
331 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
332 /* If compared lock is GROUP, then requested is PR/PW/
333 * so this is not compatible; extent range does not
335 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
336 compat = -EWOULDBLOCK;
339 *flags |= LDLM_FL_NO_TIMEOUT;
341 } else if (lock->l_policy_data.l_extent.end < req_start ||
342 lock->l_policy_data.l_extent.start > req_end) {
343 /* if a non group lock doesn't overlap skip it */
350 /* don't count conflicting glimpse locks */
352 !(lock->l_req_mode == LCK_PR &&
353 lock->l_policy_data.l_extent.start == 0 &&
354 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF);
357 if (lock->l_blocking_ast)
358 ldlm_add_ast_work_item(lock, req, work_list);
361 if (ldlm_check_contention(req, *contended_locks) &&
363 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
364 req->l_req_mode != LCK_GROUP &&
365 req_end - req_start <=
366 req->l_resource->lr_namespace->ns_max_nolock_size)
367 GOTO(destroylock, compat = -EBUSY);
371 list_del_init(&req->l_res_link);
372 ldlm_lock_destroy_nolock(req);
377 static void discard_bl_list(struct list_head *bl_list)
379 struct list_head *tmp, *pos;
382 list_for_each_safe(pos, tmp, bl_list) {
383 struct ldlm_lock *lock =
384 list_entry(pos, struct ldlm_lock, l_bl_ast);
386 list_del_init(&lock->l_bl_ast);
387 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
388 lock->l_flags &= ~LDLM_FL_AST_SENT;
389 LASSERT(lock->l_bl_ast_run == 0);
390 LASSERT(lock->l_blocking_lock);
391 LDLM_LOCK_PUT(lock->l_blocking_lock);
392 lock->l_blocking_lock = NULL;
398 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
399 * - blocking ASTs have already been sent
400 * - must call this function with the ns lock held
402 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
403 * - blocking ASTs have not been sent
404 * - must call this function with the ns lock held once */
405 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
406 ldlm_error_t *err, struct list_head *work_list)
408 struct ldlm_resource *res = lock->l_resource;
409 struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
411 int contended_locks = 0;
414 LASSERT(list_empty(&res->lr_converting));
415 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
416 !(lock->l_flags & LDLM_AST_DISCARD_DATA));
417 check_res_locked(res);
421 /* Careful observers will note that we don't handle -EWOULDBLOCK
422 * here, but it's ok for a non-obvious reason -- compat_queue
423 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
424 * flags should always be zero here, and if that ever stops
425 * being true, we want to find out. */
426 LASSERT(*flags == 0);
427 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
428 err, NULL, &contended_locks);
430 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
435 RETURN(LDLM_ITER_STOP);
437 ldlm_resource_unlink_lock(lock);
439 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
440 ldlm_extent_policy(res, lock, flags);
441 ldlm_grant_lock(lock, work_list);
442 RETURN(LDLM_ITER_CONTINUE);
447 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
448 &rpc_list, &contended_locks);
450 GOTO(out, rc); /* lock was destroyed */
454 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
455 &rpc_list, &contended_locks);
457 GOTO(out, rc = rc2); /* lock was destroyed */
461 ldlm_extent_policy(res, lock, flags);
462 ldlm_resource_unlink_lock(lock);
463 ldlm_grant_lock(lock, NULL);
465 /* If either of the compat_queue()s returned failure, then we
466 * have ASTs to send and must go onto the waiting list.
468 * bug 2322: we used to unlink and re-add here, which was a
469 * terrible folly -- if we goto restart, we could get
470 * re-ordered! Causes deadlock, because ASTs aren't sent! */
471 if (list_empty(&lock->l_res_link))
472 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
474 rc = ldlm_run_bl_ast_work(&rpc_list);
477 GOTO(restart, -ERESTART);
478 *flags |= LDLM_FL_BLOCK_GRANTED;
479 /* this way we force client to wait for the lock
480 * endlessly once the lock is enqueued -bzzz */
481 *flags |= LDLM_FL_NO_TIMEOUT;
486 if (!list_empty(&rpc_list)) {
487 LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
488 discard_bl_list(&rpc_list);
493 /* When a lock is cancelled by a client, the KMS may undergo change if this
494 * is the "highest lock". This function returns the new KMS value.
495 * Caller must hold ns_lock already.
497 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
498 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
500 struct ldlm_resource *res = lock->l_resource;
501 struct list_head *tmp;
502 struct ldlm_lock *lck;
506 /* don't let another thread in ldlm_extent_shift_kms race in
507 * just after we finish and take our lock into account in its
508 * calculation of the kms */
509 lock->l_flags |= LDLM_FL_KMS_IGNORE;
511 list_for_each(tmp, &res->lr_granted) {
512 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
514 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
517 if (lck->l_policy_data.l_extent.end >= old_kms)
520 /* This extent _has_ to be smaller than old_kms (checked above)
521 * so kms can only ever be smaller or the same as old_kms. */
522 if (lck->l_policy_data.l_extent.end + 1 > kms)
523 kms = lck->l_policy_data.l_extent.end + 1;
525 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);