1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
29 # include <liblustre.h>
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
34 #include <lustre_lib.h>
36 #include "ldlm_internal.h"
38 /* The purpose of this function is to return:
39 * - the maximum extent
40 * - containing the requested extent
41 * - and not overlapping existing conflicting extents outside the requested one
44 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
45 struct ldlm_extent *new_ex)
47 struct list_head *tmp;
48 ldlm_mode_t req_mode = req->l_req_mode;
49 __u64 req_start = req->l_req_extent.start;
50 __u64 req_end = req->l_req_extent.end;
51 __u64 req_align, mask;
55 lockmode_verify(req_mode);
57 list_for_each(tmp, queue) {
58 struct ldlm_lock *lock;
59 struct ldlm_extent *l_extent;
61 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
62 l_extent = &lock->l_policy_data.l_extent;
64 /* We already hit the minimum requested size, search no more */
65 if (new_ex->start == req_start && new_ex->end == req_end) {
70 /* Don't conflict with ourselves */
74 /* Locks are compatible, overlap doesn't matter */
75 /* Until bug 20 is fixed, try to avoid granting overlapping
76 * locks on one client (they take a long time to cancel) */
77 if (lockmode_compat(lock->l_req_mode, req_mode) &&
78 lock->l_export != req->l_export)
81 /* If this is a high-traffic lock, don't grow downwards at all
82 * or grow upwards too much */
85 new_ex->start = req_start;
87 /* If lock doesn't overlap new_ex, skip it. */
88 if (l_extent->end < new_ex->start ||
89 l_extent->start > new_ex->end)
92 /* Locks conflicting in requested extents and we can't satisfy
93 * both locks, so ignore it. Either we will ping-pong this
94 * extent (we would regardless of what extent we granted) or
95 * lock is unused and it shouldn't limit our extent growth. */
96 if (lock->l_req_extent.end >= req_start &&
97 lock->l_req_extent.start <= req_end)
100 /* We grow extents downwards only as far as they don't overlap
101 * with already-granted locks, on the assumtion that clients
102 * will be writing beyond the initial requested end and would
103 * then need to enqueue a new lock beyond previous request.
104 * l_req_extent->end strictly < req_start, checked above. */
105 if (l_extent->start < req_start && new_ex->start != req_start) {
106 if (l_extent->end >= req_start)
107 new_ex->start = req_start;
109 new_ex->start = min(l_extent->end+1, req_start);
112 /* If we need to cancel this lock anyways because our request
113 * overlaps the granted lock, we grow up to its requested
114 * extent start instead of limiting this extent, assuming that
115 * clients are writing forwards and the lock had over grown
116 * its extent downwards before we enqueued our request. */
117 if (l_extent->end > req_end) {
118 if (l_extent->start <= req_end)
119 new_ex->end = max(lock->l_req_extent.start - 1,
122 new_ex->end = max(l_extent->start - 1, req_end);
126 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
127 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
128 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
129 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
133 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
138 /* we need to ensure that the lock extent is properly aligned to what
139 * the client requested. We align it to the lowest-common denominator
140 * of the clients requested lock start and end alignment. */
142 req_align = (req_end + 1) | req_start;
143 if (req_align != 0) {
144 while ((req_align & mask) == 0)
148 /* We can only shrink the lock, not grow it.
149 * This should never cause lock to be smaller than requested,
150 * since requested lock was already aligned on these boundaries. */
151 new_ex->start = ((new_ex->start - 1) | mask) + 1;
152 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
153 LASSERTF(new_ex->start <= req_start,
154 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
155 mask, new_ex->start, req_start);
156 LASSERTF(new_ex->end >= req_end,
157 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
158 mask, new_ex->end, req_end);
163 /* In order to determine the largest possible extent we can grant, we need
164 * to scan all of the queues. */
165 static void ldlm_extent_policy(struct ldlm_resource *res,
166 struct ldlm_lock *lock, int *flags)
168 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
170 if (lock->l_export == NULL)
172 * this is local lock taken by server (e.g., as a part of
173 * OST-side locking, or unlink handling). Expansion doesn't
174 * make a lot of sense for local locks, because they are
175 * dropped immediately on operation completion and would only
176 * conflict with other threads.
180 if (lock->l_policy_data.l_extent.start == 0 &&
181 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
182 /* fast-path whole file locks */
185 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
186 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
188 if (new_ex.start != lock->l_policy_data.l_extent.start ||
189 new_ex.end != lock->l_policy_data.l_extent.end) {
190 *flags |= LDLM_FL_LOCK_CHANGED;
191 lock->l_policy_data.l_extent.start = new_ex.start;
192 lock->l_policy_data.l_extent.end = new_ex.end;
196 /* Determine if the lock is compatible with all locks on the queue.
197 * We stop walking the queue if we hit ourselves so we don't take
198 * conflicting locks enqueued after us into accound, or we'd wait forever.
200 * 0 if the lock is not compatible
201 * 1 if the lock is compatible
202 * 2 if this group lock is compatible and requires no further checking
203 * negative error, such as EWOULDBLOCK for group locks
206 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
207 int *flags, ldlm_error_t *err,
208 struct list_head *work_list)
210 struct list_head *tmp;
211 struct ldlm_lock *lock;
212 ldlm_mode_t req_mode = req->l_req_mode;
213 __u64 req_start = req->l_req_extent.start;
214 __u64 req_end = req->l_req_extent.end;
219 lockmode_verify(req_mode);
221 list_for_each(tmp, queue) {
222 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
227 if (unlikely(scan)) {
228 /* We only get here if we are queuing GROUP lock
229 and met some incompatible one. The main idea of this
230 code is to insert GROUP lock past compatible GROUP
231 lock in the waiting queue or if there is not any,
232 then in front of first non-GROUP lock */
233 if (lock->l_req_mode != LCK_GROUP) {
234 /* Ok, we hit non-GROUP lock, there should be no
235 more GROUP locks later on, queue in front of
236 first non-GROUP lock */
238 ldlm_resource_insert_lock_after(lock, req);
239 list_del_init(&lock->l_res_link);
240 ldlm_resource_insert_lock_after(req, lock);
243 if (req->l_policy_data.l_extent.gid ==
244 lock->l_policy_data.l_extent.gid) {
246 ldlm_resource_insert_lock_after(lock, req);
252 /* locks are compatible, overlap doesn't matter */
253 if (lockmode_compat(lock->l_req_mode, req_mode)) {
254 /* non-group locks are compatible, overlap doesn't
256 if (likely(req_mode != LCK_GROUP))
259 /* If we are trying to get a GROUP lock and there is
260 another one of this kind, we need to compare gid */
261 if (req->l_policy_data.l_extent.gid ==
262 lock->l_policy_data.l_extent.gid) {
263 /* If existing lock with matched gid is granted,
264 we grant new one too. */
265 if (lock->l_req_mode == lock->l_granted_mode)
268 /* Otherwise we are scanning queue of waiting
269 * locks and it means current request would
270 * block along with existing lock (that is
272 * If we are in nonblocking mode - return
274 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
275 compat = -EWOULDBLOCK;
278 /* If this group lock is compatible with another
279 * group lock on the waiting list, they must be
280 * together in the list, so they can be granted
281 * at the same time. Otherwise the later lock
282 * can get stuck behind another, incompatible,
284 ldlm_resource_insert_lock_after(lock, req);
285 /* Because 'lock' is not granted, we can stop
286 * processing this queue and return immediately.
287 * There is no need to check the rest of the
293 if (unlikely(req_mode == LCK_GROUP &&
294 (lock->l_req_mode != lock->l_granted_mode))) {
297 if (lock->l_req_mode != LCK_GROUP) {
298 /* Ok, we hit non-GROUP lock, there should
299 * be no more GROUP locks later on, queue in
300 * front of first non-GROUP lock */
302 ldlm_resource_insert_lock_after(lock, req);
303 list_del_init(&lock->l_res_link);
304 ldlm_resource_insert_lock_after(req, lock);
307 if (req->l_policy_data.l_extent.gid ==
308 lock->l_policy_data.l_extent.gid) {
310 ldlm_resource_insert_lock_after(lock, req);
316 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
317 /* If compared lock is GROUP, then requested is PR/PW/
318 * so this is not compatible; extent range does not
320 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
321 compat = -EWOULDBLOCK;
324 *flags |= LDLM_FL_NO_TIMEOUT;
326 } else if (lock->l_policy_data.l_extent.end < req_start ||
327 lock->l_policy_data.l_extent.start > req_end) {
328 /* if a non group lock doesn't overlap skip it */
336 if (lock->l_blocking_ast)
337 ldlm_add_ast_work_item(lock, req, work_list);
342 list_del_init(&req->l_res_link);
343 ldlm_lock_destroy_nolock(req);
348 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
349 * - blocking ASTs have already been sent
350 * - the caller has already initialized req->lr_tmp
351 * - must call this function with the ns lock held
353 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
354 * - blocking ASTs have not been sent
355 * - the caller has NOT initialized req->lr_tmp, so we must
356 * - must call this function with the ns lock held once */
357 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
358 ldlm_error_t *err, struct list_head *work_list)
360 struct ldlm_resource *res = lock->l_resource;
361 struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
365 LASSERT(list_empty(&res->lr_converting));
366 check_res_locked(res);
370 /* Careful observers will note that we don't handle -EWOULDBLOCK
371 * here, but it's ok for a non-obvious reason -- compat_queue
372 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
373 * flags should always be zero here, and if that ever stops
374 * being true, we want to find out. */
375 LASSERT(*flags == 0);
376 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
379 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
383 RETURN(LDLM_ITER_STOP);
385 ldlm_resource_unlink_lock(lock);
387 ldlm_extent_policy(res, lock, flags);
388 ldlm_grant_lock(lock, work_list);
389 RETURN(LDLM_ITER_CONTINUE);
393 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err, &rpc_list);
395 GOTO(out, rc); /* lock was destroyed */
399 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err, &rpc_list);
401 GOTO(out, rc = rc2); /* lock was destroyed */
405 ldlm_extent_policy(res, lock, flags);
406 ldlm_resource_unlink_lock(lock);
407 ldlm_grant_lock(lock, NULL);
409 /* If either of the compat_queue()s returned failure, then we
410 * have ASTs to send and must go onto the waiting list.
412 * bug 2322: we used to unlink and re-add here, which was a
413 * terrible folly -- if we goto restart, we could get
414 * re-ordered! Causes deadlock, because ASTs aren't sent! */
415 if (list_empty(&lock->l_res_link))
416 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
418 rc = ldlm_run_bl_ast_work(&rpc_list);
421 GOTO(restart, -ERESTART);
422 *flags |= LDLM_FL_BLOCK_GRANTED;
423 /* this way we force client to wait for the lock
424 * endlessly once the lock is enqueued -bzzz */
425 *flags |= LDLM_FL_NO_TIMEOUT;
433 /* When a lock is cancelled by a client, the KMS may undergo change if this
434 * is the "highest lock". This function returns the new KMS value.
435 * Caller must hold ns_lock already.
437 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
438 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
440 struct ldlm_resource *res = lock->l_resource;
441 struct list_head *tmp;
442 struct ldlm_lock *lck;
446 /* don't let another thread in ldlm_extent_shift_kms race in
447 * just after we finish and take our lock into account in its
448 * calculation of the kms */
449 lock->l_flags |= LDLM_FL_KMS_IGNORE;
451 list_for_each(tmp, &res->lr_granted) {
452 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
454 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
457 if (lck->l_policy_data.l_extent.end >= old_kms)
460 /* This extent _has_ to be smaller than old_kms (checked above)
461 * so kms can only ever be smaller or the same as old_kms. */
462 if (lck->l_policy_data.l_extent.end + 1 > kms)
463 kms = lck->l_policy_data.l_extent.end + 1;
465 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);