1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 # include <liblustre.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_lib.h>
33 #include "ldlm_internal.h"
35 /* The purpose of this function is to return:
36 * - the maximum extent
37 * - containing the requested extent
38 * - and not overlapping existing conflicting extents outside the requested one
41 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
42 struct ldlm_extent *new_ex)
44 struct list_head *tmp;
45 ldlm_mode_t req_mode = req->l_req_mode;
46 __u64 req_start = req->l_req_extent.start;
47 __u64 req_end = req->l_req_extent.end;
51 lockmode_verify(req_mode);
53 list_for_each(tmp, queue) {
54 struct ldlm_lock *lock;
55 struct ldlm_extent *l_extent;
57 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
58 l_extent = &lock->l_policy_data.l_extent;
60 if (new_ex->start == req_start && new_ex->end == req_end) {
65 /* Don't conflict with ourselves */
69 /* Locks are compatible, overlap doesn't matter */
70 /* Until bug 20 is fixed, try to avoid granting overlapping
71 * locks on one client (they take a long time to cancel) */
72 if (lockmode_compat(lock->l_req_mode, req_mode) &&
73 lock->l_export != req->l_export)
76 /* If this is a high-traffic lock, don't grow downwards at all
77 * or grow upwards too much */
80 new_ex->start = req_start;
82 /* If lock doesn't overlap new_ex, skip it. */
83 if (l_extent->end < new_ex->start ||
84 l_extent->start > new_ex->end)
87 /* Locks conflicting in requested extents and we can't satisfy
88 * both locks, so ignore it. Either we will ping-pong this
89 * extent (we would regardless of what extent we granted) or
90 * lock is unused and it shouldn't limit our extent growth. */
91 if (lock->l_req_extent.end >= req_start &&
92 lock->l_req_extent.start <= req_end)
95 /* We grow extents downwards only as far as they don't overlap
96 * with already-granted locks, on the assumtion that clients
97 * will be writing beyond the initial requested end and would
98 * then need to enqueue a new lock beyond previous request.
99 * l_req_extent->end strictly < req_start, checked above. */
100 if (l_extent->start < req_start && new_ex->start != req_start) {
101 if (l_extent->end >= req_start)
102 new_ex->start = req_start;
104 new_ex->start = min(l_extent->end+1, req_start);
107 /* If we need to cancel this lock anyways because our request
108 * overlaps the granted lock, we grow up to its requested
109 * extent start instead of limiting this extent, assuming that
110 * clients are writing forwards and the lock had over grown
111 * its extent downwards before we enqueued our request. */
112 if (l_extent->end > req_end) {
113 if (l_extent->start <= req_end)
114 new_ex->end = max(lock->l_req_extent.start - 1,
117 new_ex->end = max(l_extent->start - 1, req_end);
121 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
122 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
123 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
124 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
130 /* In order to determine the largest possible extent we can grant, we need
131 * to scan all of the queues. */
132 static void ldlm_extent_policy(struct ldlm_resource *res,
133 struct ldlm_lock *lock, int *flags)
135 struct ldlm_extent new_ex = { .start = 0, .end = ~0};
137 if (lock->l_req_mode == LCK_GROUP)
140 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
141 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
143 if (new_ex.start != lock->l_policy_data.l_extent.start ||
144 new_ex.end != lock->l_policy_data.l_extent.end) {
145 *flags |= LDLM_FL_LOCK_CHANGED;
146 lock->l_policy_data.l_extent.start = new_ex.start;
147 lock->l_policy_data.l_extent.end = new_ex.end;
151 /* Determine if the lock is compatible with all locks on the queue.
152 * We stop walking the queue if we hit ourselves so we don't take
153 * conflicting locks enqueued after us into accound, or we'd wait forever.
155 * 0 if the lock is not compatible
156 * 1 if the lock is compatible
157 * 2 if this group lock is compatible and requires no further checking
158 * negative error, such as EWOULDBLOCK for group locks
161 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
162 int *flags, struct list_head *work_list,
163 struct list_head **insertp)
165 struct list_head *tmp;
166 struct list_head *save = NULL;
167 struct ldlm_lock *lock = NULL;
168 ldlm_mode_t req_mode = req->l_req_mode;
173 lockmode_verify(req_mode);
175 /* Extent locks are only queued once. We can get back here with
176 * insertp != NULL if the blocking ASTs returned -ERESTART. */
177 if (!list_empty(&req->l_res_link))
180 if (req->l_req_mode != LCK_GROUP) {
181 __u64 req_start = req->l_req_extent.start;
182 __u64 req_end = req->l_req_extent.end;
184 list_for_each(tmp, queue) {
185 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
189 if (lock->l_req_mode == LCK_GROUP) {
190 if (*flags & LDLM_FL_BLOCK_NOWAIT)
191 RETURN(-EWOULDBLOCK);
193 /* No blocking ASTs are sent for group locks. */
196 /* there's a blocking group lock in front
197 * of us on the queue. It can be held
198 * indefinitely, so don't timeout. */
200 *flags |= LDLM_FL_NO_TIMEOUT;
201 /* lock_bitlock(req) is held here. */
202 req->l_flags |= LDLM_FL_NO_TIMEOUT;
211 /* locks are compatible, overlap doesn't matter */
212 if (lockmode_compat(lock->l_req_mode, req_mode))
215 if (lock->l_policy_data.l_extent.end < req_start ||
216 lock->l_policy_data.l_extent.start > req_end)
224 if (lock->l_blocking_ast)
225 ldlm_add_ast_work_item(lock, req, work_list);
234 list_for_each(tmp, queue) {
235 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
239 if (lock->l_req_mode != LCK_GROUP) {
240 if (lock->l_req_mode != lock->l_granted_mode) {
241 /* we must be traversing the waitq. */
243 /* If a group lock was already found then
244 * req can be queued before any extent locks
245 * that come after the found group lock. */
250 /* We've hit a conflicting extent lock
251 * on the waitq before hitting the req
252 * group lock. See comments below. */
257 /* Group locks are not normally blocked by
258 * waiting PR|PW locks. */
260 /* If NO_TIMEOUT was sent back to the client
261 * we can queue the group lock in front of
262 * this extent lock. */
263 if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
269 /* If we did NOT send NO_TIMEOUT back to the
270 * client for this extent lock then the client
271 * could possibly timeout if we queue this
272 * group lock before it, so don't. This is the
273 * only way to get a conflicting extent lock
274 * in front of a group lock on the waitq. */
279 LASSERT(save == NULL);
283 /* If we previously skipped over some extent locks
284 * because we thought we were going to queue the
285 * group lock in front of them then we need to go back
286 * and send blocking ASTs for the locks we skipped. */
288 struct ldlm_lock *lck2;
290 for (; save != tmp; save = save->next) {
291 lck2 = list_entry(save,
295 /* If there was a group lock after save
296 * then we would have exited this loop
298 LASSERT(lck2->l_req_mode!=LCK_GROUP);
300 if (lck2->l_blocking_ast) {
301 ldlm_add_ast_work_item(lck2,req,
308 if (lock->l_blocking_ast)
309 ldlm_add_ast_work_item(lock, req, work_list);
313 /* If it was safe to insert a group lock at save,
314 * i.e. save != NULL, then this group lock already
315 * on the queue would have been inserted before save. */
316 LASSERT(save == NULL);
318 /* Note: no blocking ASTs are sent for group locks. */
320 if (lock->l_policy_data.l_extent.gid ==
321 req->l_policy_data.l_extent.gid) {
322 /* group locks with this gid already on the waitq. */
325 if (lock->l_req_mode == lock->l_granted_mode) {
326 /* if a group lock with this gid has already
327 * been granted then grant this one. */
335 /* group locks already exist on the queue. */
338 if (*flags & LDLM_FL_BLOCK_NOWAIT)
339 RETURN(-EWOULDBLOCK);
343 /* there's a blocking group lock in front
344 * of us on the queue. It can be held
345 * indefinitely, so don't timeout. */
346 *flags |= LDLM_FL_NO_TIMEOUT;
348 /* the only reason to continue traversing the
349 * list at this point is to find the proper
350 * place to insert the lock in the waitq. */
356 if (insertp != NULL) {
366 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
367 * - blocking ASTs have already been sent
368 * - the caller has already initialized req->lr_tmp
369 * - must call this function with the ns lock held
371 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
372 * - blocking ASTs have not been sent
373 * - the caller has NOT initialized req->lr_tmp, so we must
374 * - must call this function with the ns lock held once */
375 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
376 ldlm_error_t *err, struct list_head *work_list)
378 struct ldlm_resource *res = lock->l_resource;
379 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
380 struct list_head *insertp = NULL;
384 LASSERT(list_empty(&res->lr_converting));
388 /* -EWOULDBLOCK can't occur here since (flags & BLOCK_NOWAIT)
389 * lock requests would either be granted or fail on their
390 * first_enq. flags should always be zero here, and if that
391 * ever changes we want to find out. */
392 LASSERT(*flags == 0);
393 rc = ldlm_extent_compat_queue(&res->lr_granted, lock,
396 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
400 RETURN(LDLM_ITER_STOP);
402 ldlm_resource_unlink_lock(lock);
403 ldlm_extent_policy(res, lock, flags);
405 lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
406 unlock_bitlock(lock);
407 ldlm_grant_lock(lock, work_list);
408 RETURN(LDLM_ITER_CONTINUE);
412 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, &rpc_list,
415 GOTO(destroylock, rc);
419 /* Traverse the waiting list in case there are other conflicting
420 * lock requests ahead of us in the queue and send blocking ASTs */
421 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, &rpc_list,
424 GOTO(destroylock, rc);
427 ldlm_extent_policy(res, lock, flags);
428 ldlm_resource_unlink_lock(lock);
429 lock->l_flags &= ~LDLM_FL_NO_TIMEOUT;
430 ldlm_grant_lock(lock, NULL);
432 /* If either of the compat_queue()s returned failure, then we
433 * have ASTs to send and must go onto the waiting list.
435 * bug 2322: we used to unlink and re-add here, which was a
436 * terrible folly -- if we goto restart, we could get
437 * re-ordered! Causes deadlock, because ASTs aren't sent! */
438 if (list_empty(&lock->l_res_link))
439 ldlm_resource_add_lock(res, insertp, lock);
441 rc = ldlm_run_bl_ast_work(&rpc_list);
444 GOTO(restart, -ERESTART);
445 *flags |= LDLM_FL_BLOCK_GRANTED;
451 list_del_init(&lock->l_res_link);
453 ldlm_lock_destroy(lock);
459 /* When a lock is cancelled by a client, the KMS may undergo change if this
460 * is the "highest lock". This function returns the new KMS value.
461 * Caller must hold ns_lock already.
463 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
464 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
466 struct ldlm_resource *res = lock->l_resource;
467 struct list_head *tmp;
468 struct ldlm_lock *lck;
472 /* don't let another thread in ldlm_extent_shift_kms race in
473 * just after we finish and take our lock into account in its
474 * calculation of the kms */
476 lock->l_flags |= LDLM_FL_KMS_IGNORE;
478 list_for_each(tmp, &res->lr_granted) {
479 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
481 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
484 if (lck->l_policy_data.l_extent.end >= old_kms)
487 /* This extent _has_ to be smaller than old_kms (checked above)
488 * so kms can only ever be smaller or the same as old_kms. */
489 if (lck->l_policy_data.l_extent.end + 1 > kms)
490 kms = lck->l_policy_data.l_extent.end + 1;
492 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);