1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
29 # include <liblustre.h>
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
34 #include <lustre_lib.h>
36 #include "ldlm_internal.h"
38 /* The purpose of this function is to return:
39 * - the maximum extent
40 * - containing the requested extent
41 * - and not overlapping existing conflicting extents outside the requested one
44 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
45 struct ldlm_extent *new_ex)
47 struct list_head *tmp;
48 ldlm_mode_t req_mode = req->l_req_mode;
49 __u64 req_start = req->l_req_extent.start;
50 __u64 req_end = req->l_req_extent.end;
54 lockmode_verify(req_mode);
56 list_for_each(tmp, queue) {
57 struct ldlm_lock *lock;
58 struct ldlm_extent *l_extent;
60 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
61 l_extent = &lock->l_policy_data.l_extent;
63 if (new_ex->start == req_start && new_ex->end == req_end) {
68 /* Don't conflict with ourselves */
72 /* Locks are compatible, overlap doesn't matter */
73 /* Until bug 20 is fixed, try to avoid granting overlapping
74 * locks on one client (they take a long time to cancel) */
75 if (lockmode_compat(lock->l_req_mode, req_mode) &&
76 lock->l_export != req->l_export)
79 /* If this is a high-traffic lock, don't grow downwards at all
80 * or grow upwards too much */
83 new_ex->start = req_start;
85 /* If lock doesn't overlap new_ex, skip it. */
86 if (l_extent->end < new_ex->start ||
87 l_extent->start > new_ex->end)
90 /* Locks conflicting in requested extents and we can't satisfy
91 * both locks, so ignore it. Either we will ping-pong this
92 * extent (we would regardless of what extent we granted) or
93 * lock is unused and it shouldn't limit our extent growth. */
94 if (lock->l_req_extent.end >= req_start &&
95 lock->l_req_extent.start <= req_end)
98 /* We grow extents downwards only as far as they don't overlap
99 * with already-granted locks, on the assumtion that clients
100 * will be writing beyond the initial requested end and would
101 * then need to enqueue a new lock beyond previous request.
102 * l_req_extent->end strictly < req_start, checked above. */
103 if (l_extent->start < req_start && new_ex->start != req_start) {
104 if (l_extent->end >= req_start)
105 new_ex->start = req_start;
107 new_ex->start = min(l_extent->end+1, req_start);
110 /* If we need to cancel this lock anyways because our request
111 * overlaps the granted lock, we grow up to its requested
112 * extent start instead of limiting this extent, assuming that
113 * clients are writing forwards and the lock had over grown
114 * its extent downwards before we enqueued our request. */
115 if (l_extent->end > req_end) {
116 if (l_extent->start <= req_end)
117 new_ex->end = max(lock->l_req_extent.start - 1,
120 new_ex->end = max(l_extent->start - 1, req_end);
124 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
125 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
126 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
127 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
133 /* In order to determine the largest possible extent we can grant, we need
134 * to scan all of the queues. */
135 static void ldlm_extent_policy(struct ldlm_resource *res,
136 struct ldlm_lock *lock, int *flags)
138 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
140 if (lock->l_export == NULL)
142 * this is local lock taken by server (e.g., as a part of
143 * OST-side locking, or unlink handling). Expansion doesn't
144 * make a lot of sense for local locks, because they are
145 * dropped immediately on operation completion and would only
146 * conflict with other threads.
150 if (lock->l_policy_data.l_extent.start == 0 &&
151 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
152 /* fast-path whole file locks */
155 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
156 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
158 if (new_ex.start != lock->l_policy_data.l_extent.start ||
159 new_ex.end != lock->l_policy_data.l_extent.end) {
160 *flags |= LDLM_FL_LOCK_CHANGED;
161 lock->l_policy_data.l_extent.start = new_ex.start;
162 lock->l_policy_data.l_extent.end = new_ex.end;
166 /* Determine if the lock is compatible with all locks on the queue.
167 * We stop walking the queue if we hit ourselves so we don't take
168 * conflicting locks enqueued after us into accound, or we'd wait forever.
170 * 0 if the lock is not compatible
171 * 1 if the lock is compatible
172 * 2 if this group lock is compatible and requires no further checking
173 * negative error, such as EWOULDBLOCK for group locks
176 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
177 int *flags, ldlm_error_t *err,
178 struct list_head *work_list)
180 struct list_head *tmp;
181 struct ldlm_lock *lock;
182 ldlm_mode_t req_mode = req->l_req_mode;
183 __u64 req_start = req->l_req_extent.start;
184 __u64 req_end = req->l_req_extent.end;
189 lockmode_verify(req_mode);
191 list_for_each(tmp, queue) {
192 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
197 if (unlikely(scan)) {
198 /* We only get here if we are queuing GROUP lock
199 and met some incompatible one. The main idea of this
200 code is to insert GROUP lock past compatible GROUP
201 lock in the waiting queue or if there is not any,
202 then in front of first non-GROUP lock */
203 if (lock->l_req_mode != LCK_GROUP) {
204 /* Ok, we hit non-GROUP lock, there should be no
205 more GROUP locks later on, queue in front of
206 first non-GROUP lock */
208 ldlm_resource_insert_lock_after(lock, req);
209 list_del_init(&lock->l_res_link);
210 ldlm_resource_insert_lock_after(req, lock);
213 if (req->l_policy_data.l_extent.gid ==
214 lock->l_policy_data.l_extent.gid) {
216 ldlm_resource_insert_lock_after(lock, req);
222 /* locks are compatible, overlap doesn't matter */
223 if (lockmode_compat(lock->l_req_mode, req_mode)) {
224 /* non-group locks are compatible, overlap doesn't
226 if (likely(req_mode != LCK_GROUP))
229 /* If we are trying to get a GROUP lock and there is
230 another one of this kind, we need to compare gid */
231 if (req->l_policy_data.l_extent.gid ==
232 lock->l_policy_data.l_extent.gid) {
233 /* If existing lock with matched gid is granted,
234 we grant new one too. */
235 if (lock->l_req_mode == lock->l_granted_mode)
238 /* Otherwise we are scanning queue of waiting
239 * locks and it means current request would
240 * block along with existing lock (that is
242 * If we are in nonblocking mode - return
244 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
245 compat = -EWOULDBLOCK;
248 /* If this group lock is compatible with another
249 * group lock on the waiting list, they must be
250 * together in the list, so they can be granted
251 * at the same time. Otherwise the later lock
252 * can get stuck behind another, incompatible,
254 ldlm_resource_insert_lock_after(lock, req);
255 /* Because 'lock' is not granted, we can stop
256 * processing this queue and return immediately.
257 * There is no need to check the rest of the
263 if (unlikely(req_mode == LCK_GROUP &&
264 (lock->l_req_mode != lock->l_granted_mode))) {
267 if (lock->l_req_mode != LCK_GROUP) {
268 /* Ok, we hit non-GROUP lock, there should be no
269 more GROUP locks later on, queue in front of
270 first non-GROUP lock */
272 ldlm_resource_insert_lock_after(lock, req);
273 list_del_init(&lock->l_res_link);
274 ldlm_resource_insert_lock_after(req, lock);
277 if (req->l_policy_data.l_extent.gid ==
278 lock->l_policy_data.l_extent.gid) {
280 ldlm_resource_insert_lock_after(lock, req);
286 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
287 /* If compared lock is GROUP,then requested is PR/PW/ =>
288 * this is not compatible; extent range does not
290 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
291 compat = -EWOULDBLOCK;
294 *flags |= LDLM_FL_NO_TIMEOUT;
296 } else if (lock->l_policy_data.l_extent.end < req_start ||
297 lock->l_policy_data.l_extent.start > req_end) {
298 /* if a non group lock doesn't overlap skip it */
306 if (lock->l_blocking_ast)
307 ldlm_add_ast_work_item(lock, req, work_list);
312 list_del_init(&req->l_res_link);
313 ldlm_lock_destroy_nolock(req);
318 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
319 * - blocking ASTs have already been sent
320 * - the caller has already initialized req->lr_tmp
321 * - must call this function with the ns lock held
323 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
324 * - blocking ASTs have not been sent
325 * - the caller has NOT initialized req->lr_tmp, so we must
326 * - must call this function with the ns lock held once */
327 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
328 ldlm_error_t *err, struct list_head *work_list)
330 struct ldlm_resource *res = lock->l_resource;
331 struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
335 LASSERT(list_empty(&res->lr_converting));
336 check_res_locked(res);
340 /* Careful observers will note that we don't handle -EWOULDBLOCK
341 * here, but it's ok for a non-obvious reason -- compat_queue
342 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
343 * flags should always be zero here, and if that ever stops
344 * being true, we want to find out. */
345 LASSERT(*flags == 0);
346 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
349 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
353 RETURN(LDLM_ITER_STOP);
355 ldlm_resource_unlink_lock(lock);
357 ldlm_extent_policy(res, lock, flags);
358 ldlm_grant_lock(lock, work_list);
359 RETURN(LDLM_ITER_CONTINUE);
363 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err, &rpc_list);
365 GOTO(out, rc); /* lock was destroyed */
369 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err, &rpc_list);
371 GOTO(out, rc = rc2); /* lock was destroyed */
375 ldlm_extent_policy(res, lock, flags);
376 ldlm_resource_unlink_lock(lock);
377 ldlm_grant_lock(lock, NULL);
379 /* If either of the compat_queue()s returned failure, then we
380 * have ASTs to send and must go onto the waiting list.
382 * bug 2322: we used to unlink and re-add here, which was a
383 * terrible folly -- if we goto restart, we could get
384 * re-ordered! Causes deadlock, because ASTs aren't sent! */
385 if (list_empty(&lock->l_res_link))
386 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
388 rc = ldlm_run_bl_ast_work(&rpc_list);
391 GOTO(restart, -ERESTART);
392 *flags |= LDLM_FL_BLOCK_GRANTED;
393 /* this way we force client to wait for the lock
394 * endlessly once the lock is enqueued -bzzz */
395 *flags |= LDLM_FL_NO_TIMEOUT;
403 /* When a lock is cancelled by a client, the KMS may undergo change if this
404 * is the "highest lock". This function returns the new KMS value.
405 * Caller must hold ns_lock already.
407 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
408 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
410 struct ldlm_resource *res = lock->l_resource;
411 struct list_head *tmp;
412 struct ldlm_lock *lck;
416 /* don't let another thread in ldlm_extent_shift_kms race in
417 * just after we finish and take our lock into account in its
418 * calculation of the kms */
419 lock->l_flags |= LDLM_FL_KMS_IGNORE;
421 list_for_each(tmp, &res->lr_granted) {
422 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
424 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
427 if (lck->l_policy_data.l_extent.end >= old_kms)
430 /* This extent _has_ to be smaller than old_kms (checked above)
431 * so kms can only ever be smaller or the same as old_kms. */
432 if (lck->l_policy_data.l_extent.end + 1 > kms)
433 kms = lck->l_policy_data.l_extent.end + 1;
435 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);