4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_extent.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 * This file contains implementation of EXTENT lock type
45 * EXTENT lock type is for locking a contiguous range of values, represented
46 * by 64-bit starting and ending offsets (inclusive). There are several extent
47 * lock modes, some of which may be mutually incompatible. Extent locks are
48 * considered incompatible if their modes are incompatible and their extents
49 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
52 #define DEBUG_SUBSYSTEM S_LDLM
54 #include <libcfs/libcfs.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
58 #include <obd_class.h>
59 #include <lustre_lib.h>
61 #include "ldlm_internal.h"
63 #ifdef HAVE_SERVER_SUPPORT
64 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
67 * Fix up the ldlm_extent after expanding it.
69 * After expansion has been done, we might still want to do certain adjusting
70 * based on overall contention of the resource and the like to avoid granting
73 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
74 struct ldlm_extent *new_ex,
77 enum ldlm_mode req_mode = req->l_req_mode;
78 __u64 req_start = req->l_req_extent.start;
79 __u64 req_end = req->l_req_extent.end;
80 __u64 req_align, mask;
82 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
83 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
84 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
88 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
93 /* we need to ensure that the lock extent is properly aligned to what
94 * the client requested. Also we need to make sure it's also server
95 * page size aligned otherwise a server page can be covered by two
97 mask = PAGE_CACHE_SIZE;
98 req_align = (req_end + 1) | req_start;
99 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
100 while ((req_align & mask) == 0)
104 /* We can only shrink the lock, not grow it.
105 * This should never cause lock to be smaller than requested,
106 * since requested lock was already aligned on these boundaries. */
107 new_ex->start = ((new_ex->start - 1) | mask) + 1;
108 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
109 LASSERTF(new_ex->start <= req_start,
110 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
111 mask, new_ex->start, req_start);
112 LASSERTF(new_ex->end >= req_end,
113 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
114 mask, new_ex->end, req_end);
118 * Return the maximum extent that:
119 * - contains the requested extent
120 * - does not overlap existing conflicting extents outside the requested one
122 * This allows clients to request a small required extent range, but if there
123 * is no contention on the lock the full lock can be granted to the client.
124 * This avoids the need for many smaller lock requests to be granted in the
125 * common (uncontended) case.
127 * Use interval tree to expand the lock extent for granted lock.
129 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
130 struct ldlm_extent *new_ex)
132 struct ldlm_resource *res = req->l_resource;
133 enum ldlm_mode req_mode = req->l_req_mode;
134 __u64 req_start = req->l_req_extent.start;
135 __u64 req_end = req->l_req_extent.end;
136 struct ldlm_interval_tree *tree;
137 struct interval_node_extent limiter = { new_ex->start, new_ex->end };
142 lockmode_verify(req_mode);
144 /* Using interval tree to handle the LDLM extent granted locks. */
145 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
146 struct interval_node_extent ext = { req_start, req_end };
148 tree = &res->lr_itree[idx];
149 if (lockmode_compat(tree->lit_mode, req_mode))
152 conflicting += tree->lit_size;
154 limiter.start = req_start;
156 if (interval_is_overlapped(tree->lit_root, &ext))
158 "req_mode = %d, tree->lit_mode = %d, "
159 "tree->lit_size = %d\n",
160 req_mode, tree->lit_mode, tree->lit_size);
161 interval_expand(tree->lit_root, &ext, &limiter);
162 limiter.start = max(limiter.start, ext.start);
163 limiter.end = min(limiter.end, ext.end);
164 if (limiter.start == req_start && limiter.end == req_end)
168 new_ex->start = limiter.start;
169 new_ex->end = limiter.end;
170 LASSERT(new_ex->start <= req_start);
171 LASSERT(new_ex->end >= req_end);
173 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
177 /* The purpose of this function is to return:
178 * - the maximum extent
179 * - containing the requested extent
180 * - and not overlapping existing conflicting extents outside the requested one
183 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
184 struct ldlm_extent *new_ex)
186 struct ldlm_resource *res = req->l_resource;
187 enum ldlm_mode req_mode = req->l_req_mode;
188 __u64 req_start = req->l_req_extent.start;
189 __u64 req_end = req->l_req_extent.end;
190 struct ldlm_lock *lock;
194 lockmode_verify(req_mode);
196 /* for waiting locks */
197 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
198 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
200 /* We already hit the minimum requested size, search no more */
201 if (new_ex->start == req_start && new_ex->end == req_end) {
206 /* Don't conflict with ourselves */
210 /* Locks are compatible, overlap doesn't matter */
211 /* Until bug 20 is fixed, try to avoid granting overlapping
212 * locks on one client (they take a long time to cancel) */
213 if (lockmode_compat(lock->l_req_mode, req_mode) &&
214 lock->l_export != req->l_export)
217 /* If this is a high-traffic lock, don't grow downwards at all
218 * or grow upwards too much */
221 new_ex->start = req_start;
223 /* If lock doesn't overlap new_ex, skip it. */
224 if (!ldlm_extent_overlap(l_extent, new_ex))
227 /* Locks conflicting in requested extents and we can't satisfy
228 * both locks, so ignore it. Either we will ping-pong this
229 * extent (we would regardless of what extent we granted) or
230 * lock is unused and it shouldn't limit our extent growth. */
231 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234 /* We grow extents downwards only as far as they don't overlap
235 * with already-granted locks, on the assumption that clients
236 * will be writing beyond the initial requested end and would
237 * then need to enqueue a new lock beyond previous request.
238 * l_req_extent->end strictly < req_start, checked above. */
239 if (l_extent->start < req_start && new_ex->start != req_start) {
240 if (l_extent->end >= req_start)
241 new_ex->start = req_start;
243 new_ex->start = min(l_extent->end+1, req_start);
246 /* If we need to cancel this lock anyways because our request
247 * overlaps the granted lock, we grow up to its requested
248 * extent start instead of limiting this extent, assuming that
249 * clients are writing forwards and the lock had over grown
250 * its extent downwards before we enqueued our request. */
251 if (l_extent->end > req_end) {
252 if (l_extent->start <= req_end)
253 new_ex->end = max(lock->l_req_extent.start - 1,
256 new_ex->end = max(l_extent->start - 1, req_end);
260 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
265 /* In order to determine the largest possible extent we can grant, we need
266 * to scan all of the queues. */
267 static void ldlm_extent_policy(struct ldlm_resource *res,
268 struct ldlm_lock *lock, __u64 *flags)
270 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
272 if (lock->l_export == NULL)
274 * this is local lock taken by server (e.g., as a part of
275 * OST-side locking, or unlink handling). Expansion doesn't
276 * make a lot of sense for local locks, because they are
277 * dropped immediately on operation completion and would only
278 * conflict with other threads.
282 if (lock->l_policy_data.l_extent.start == 0 &&
283 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
284 /* fast-path whole file locks */
287 ldlm_extent_internal_policy_granted(lock, &new_ex);
288 ldlm_extent_internal_policy_waiting(lock, &new_ex);
290 if (new_ex.start != lock->l_policy_data.l_extent.start ||
291 new_ex.end != lock->l_policy_data.l_extent.end) {
292 *flags |= LDLM_FL_LOCK_CHANGED;
293 lock->l_policy_data.l_extent.start = new_ex.start;
294 lock->l_policy_data.l_extent.end = new_ex.end;
298 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
300 struct ldlm_resource *res = lock->l_resource;
301 cfs_time_t now = cfs_time_current();
303 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
306 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
307 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
308 res->lr_contention_time = now;
309 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
310 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
313 struct ldlm_extent_compat_args {
314 struct list_head *work_list;
315 struct ldlm_lock *lock;
321 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
324 struct ldlm_extent_compat_args *priv = data;
325 struct ldlm_interval *node = to_ldlm_interval(n);
326 struct ldlm_extent *extent;
327 struct list_head *work_list = priv->work_list;
328 struct ldlm_lock *lock, *enq = priv->lock;
329 enum ldlm_mode mode = priv->mode;
333 LASSERT(!list_empty(&node->li_group));
335 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
336 /* interval tree is for granted lock */
337 LASSERTF(mode == lock->l_granted_mode,
338 "mode = %s, lock->l_granted_mode = %s\n",
340 ldlm_lockname[lock->l_granted_mode]);
342 if (lock->l_blocking_ast &&
343 lock->l_granted_mode != LCK_GROUP)
344 ldlm_add_ast_work_item(lock, enq, work_list);
347 /* don't count conflicting glimpse locks */
348 extent = ldlm_interval_extent(node);
349 if (!(mode == LCK_PR &&
350 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
351 *priv->locks += count;
356 RETURN(INTERVAL_ITER_CONT);
360 * Determine if the lock is compatible with all locks on the queue.
362 * If \a work_list is provided, conflicting locks are linked there.
363 * If \a work_list is not provided, we exit this function on first conflict.
365 * \retval 0 if the lock is not compatible
366 * \retval 1 if the lock is compatible
367 * \retval 2 if \a req is a group lock and it is compatible and requires
368 * no further checking
369 * \retval negative error, such as EWOULDBLOCK for group locks
372 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
373 __u64 *flags, enum ldlm_error *err,
374 struct list_head *work_list, int *contended_locks)
376 struct ldlm_resource *res = req->l_resource;
377 enum ldlm_mode req_mode = req->l_req_mode;
378 __u64 req_start = req->l_req_extent.start;
379 __u64 req_end = req->l_req_extent.end;
380 struct ldlm_lock *lock;
381 int check_contention;
386 lockmode_verify(req_mode);
388 /* Using interval tree for granted lock */
389 if (queue == &res->lr_granted) {
390 struct ldlm_interval_tree *tree;
391 struct ldlm_extent_compat_args data = {.work_list = work_list,
393 .locks = contended_locks,
395 struct interval_node_extent ex = { .start = req_start,
399 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
400 tree = &res->lr_itree[idx];
401 if (tree->lit_root == NULL) /* empty tree, skipped */
404 data.mode = tree->lit_mode;
405 if (lockmode_compat(req_mode, tree->lit_mode)) {
406 struct ldlm_interval *node;
407 struct ldlm_extent *extent;
409 if (req_mode != LCK_GROUP)
412 /* group lock, grant it immediately if
414 node = to_ldlm_interval(tree->lit_root);
415 extent = ldlm_interval_extent(node);
416 if (req->l_policy_data.l_extent.gid ==
421 if (tree->lit_mode == LCK_GROUP) {
422 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
423 compat = -EWOULDBLOCK;
427 *flags |= LDLM_FL_NO_TIMEOUT;
431 /* if work list is not NULL,add all
432 locks in the tree to work list */
434 interval_iterate(tree->lit_root,
435 ldlm_extent_compat_cb, &data);
440 rc = interval_is_overlapped(tree->lit_root,&ex);
444 interval_search(tree->lit_root, &ex,
445 ldlm_extent_compat_cb, &data);
446 if (!list_empty(work_list) && compat)
450 } else { /* for waiting queue */
451 list_for_each_entry(lock, queue, l_res_link) {
452 check_contention = 1;
454 /* We stop walking the queue if we hit ourselves so
455 * we don't take conflicting locks enqueued after us
456 * into account, or we'd wait forever. */
460 if (unlikely(scan)) {
461 /* We only get here if we are queuing GROUP lock
462 and met some incompatible one. The main idea of this
463 code is to insert GROUP lock past compatible GROUP
464 lock in the waiting queue or if there is not any,
465 then in front of first non-GROUP lock */
466 if (lock->l_req_mode != LCK_GROUP) {
467 /* Ok, we hit non-GROUP lock, there should
468 * be no more GROUP locks later on, queue in
469 * front of first non-GROUP lock */
471 ldlm_resource_insert_lock_after(lock, req);
472 list_del_init(&lock->l_res_link);
473 ldlm_resource_insert_lock_after(req, lock);
477 if (req->l_policy_data.l_extent.gid ==
478 lock->l_policy_data.l_extent.gid) {
480 ldlm_resource_insert_lock_after(lock, req);
487 /* locks are compatible, overlap doesn't matter */
488 if (lockmode_compat(lock->l_req_mode, req_mode)) {
489 if (req_mode == LCK_PR &&
490 ((lock->l_policy_data.l_extent.start <=
491 req->l_policy_data.l_extent.start) &&
492 (lock->l_policy_data.l_extent.end >=
493 req->l_policy_data.l_extent.end))) {
494 /* If we met a PR lock just like us or
495 wider, and nobody down the list
496 conflicted with it, that means we
497 can skip processing of the rest of
498 the list and safely place ourselves
499 at the end of the list, or grant
500 (dependent if we met an conflicting
501 locks before in the list). In case
502 of 1st enqueue only we continue
503 traversing if there is something
504 conflicting down the list because
505 we need to make sure that something
506 is marked as AST_SENT as well, in
507 cse of empy worklist we would exit
508 on first conflict met. */
509 /* There IS a case where such flag is
510 not set for a lock, yet it blocks
511 something. Luckily for us this is
512 only during destroy, so lock is
513 exclusive. So here we are safe */
514 if (!ldlm_is_ast_sent(lock))
518 /* non-group locks are compatible, overlap doesn't
520 if (likely(req_mode != LCK_GROUP))
523 /* If we are trying to get a GROUP lock and there is
524 another one of this kind, we need to compare gid */
525 if (req->l_policy_data.l_extent.gid ==
526 lock->l_policy_data.l_extent.gid) {
527 /* If existing lock with matched gid is granted,
528 we grant new one too. */
529 if (lock->l_req_mode == lock->l_granted_mode)
532 /* Otherwise we are scanning queue of waiting
533 * locks and it means current request would
534 * block along with existing lock (that is
536 * If we are in nonblocking mode - return
538 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
539 compat = -EWOULDBLOCK;
542 /* If this group lock is compatible with another
543 * group lock on the waiting list, they must be
544 * together in the list, so they can be granted
545 * at the same time. Otherwise the later lock
546 * can get stuck behind another, incompatible,
548 ldlm_resource_insert_lock_after(lock, req);
549 /* Because 'lock' is not granted, we can stop
550 * processing this queue and return immediately.
551 * There is no need to check the rest of the
557 if (unlikely(req_mode == LCK_GROUP &&
558 (lock->l_req_mode != lock->l_granted_mode))) {
561 if (lock->l_req_mode != LCK_GROUP) {
562 /* Ok, we hit non-GROUP lock, there should be no
563 more GROUP locks later on, queue in front of
564 first non-GROUP lock */
566 ldlm_resource_insert_lock_after(lock, req);
567 list_del_init(&lock->l_res_link);
568 ldlm_resource_insert_lock_after(req, lock);
571 if (req->l_policy_data.l_extent.gid ==
572 lock->l_policy_data.l_extent.gid) {
574 ldlm_resource_insert_lock_after(lock, req);
580 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
581 /* If compared lock is GROUP, then requested is PR/PW/
582 * so this is not compatible; extent range does not
584 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
585 compat = -EWOULDBLOCK;
588 *flags |= LDLM_FL_NO_TIMEOUT;
590 } else if (lock->l_policy_data.l_extent.end < req_start ||
591 lock->l_policy_data.l_extent.start > req_end) {
592 /* if a non group lock doesn't overlap skip it */
594 } else if (lock->l_req_extent.end < req_start ||
595 lock->l_req_extent.start > req_end) {
596 /* false contention, the requests doesn't really overlap */
597 check_contention = 0;
603 /* don't count conflicting glimpse locks */
604 if (lock->l_req_mode == LCK_PR &&
605 lock->l_policy_data.l_extent.start == 0 &&
606 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
607 check_contention = 0;
609 *contended_locks += check_contention;
612 if (lock->l_blocking_ast &&
613 lock->l_req_mode != LCK_GROUP)
614 ldlm_add_ast_work_item(lock, req, work_list);
618 if (ldlm_check_contention(req, *contended_locks) &&
620 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
621 req->l_req_mode != LCK_GROUP &&
622 req_end - req_start <=
623 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
624 GOTO(destroylock, compat = -EUSERS);
628 list_del_init(&req->l_res_link);
629 ldlm_lock_destroy_nolock(req);
635 * Discard all AST work items from list.
637 * If for whatever reason we do not want to send ASTs to conflicting locks
638 * anymore, disassemble the list with this function.
640 static void discard_bl_list(struct list_head *bl_list)
642 struct list_head *tmp, *pos;
645 list_for_each_safe(pos, tmp, bl_list) {
646 struct ldlm_lock *lock =
647 list_entry(pos, struct ldlm_lock, l_bl_ast);
649 list_del_init(&lock->l_bl_ast);
650 LASSERT(ldlm_is_ast_sent(lock));
651 ldlm_clear_ast_sent(lock);
652 LASSERT(lock->l_bl_ast_run == 0);
653 LASSERT(lock->l_blocking_lock);
654 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
655 lock->l_blocking_lock = NULL;
656 LDLM_LOCK_RELEASE(lock);
662 * Process a granting attempt for extent lock.
663 * Must be called with ns lock held.
665 * This function looks for any conflicts for \a lock in the granted or
666 * waiting queues. The lock is granted if no conflicts are found in
669 * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
670 * - blocking ASTs have already been sent
672 * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
673 * - blocking ASTs have not been sent yet, so list of conflicting locks
674 * would be collected and ASTs sent.
676 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
677 int first_enq, enum ldlm_error *err,
678 struct list_head *work_list)
680 struct ldlm_resource *res = lock->l_resource;
681 struct list_head rpc_list;
683 int contended_locks = 0;
686 LASSERT(lock->l_granted_mode != lock->l_req_mode);
687 LASSERT(list_empty(&res->lr_converting));
688 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
689 !ldlm_is_ast_discard_data(lock));
690 INIT_LIST_HEAD(&rpc_list);
691 check_res_locked(res);
695 /* Careful observers will note that we don't handle -EWOULDBLOCK
696 * here, but it's ok for a non-obvious reason -- compat_queue
697 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
698 * flags should always be zero here, and if that ever stops
699 * being true, we want to find out. */
700 LASSERT(*flags == 0);
701 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
702 err, NULL, &contended_locks);
704 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
709 RETURN(LDLM_ITER_STOP);
711 ldlm_resource_unlink_lock(lock);
713 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
714 ldlm_extent_policy(res, lock, flags);
715 ldlm_grant_lock(lock, work_list);
716 RETURN(LDLM_ITER_CONTINUE);
721 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
722 &rpc_list, &contended_locks);
724 GOTO(out, rc); /* lock was destroyed */
728 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
729 &rpc_list, &contended_locks);
731 GOTO(out, rc = rc2); /* lock was destroyed */
735 ldlm_extent_policy(res, lock, flags);
736 ldlm_resource_unlink_lock(lock);
737 ldlm_grant_lock(lock, NULL);
739 /* If either of the compat_queue()s returned failure, then we
740 * have ASTs to send and must go onto the waiting list.
742 * bug 2322: we used to unlink and re-add here, which was a
743 * terrible folly -- if we goto restart, we could get
744 * re-ordered! Causes deadlock, because ASTs aren't sent! */
745 if (list_empty(&lock->l_res_link))
746 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
748 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
751 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
752 !ns_is_client(ldlm_res_to_ns(res)))
753 class_fail_export(lock->l_export);
756 if (rc == -ERESTART) {
757 /* 15715: The lock was granted and destroyed after
758 * resource lock was dropped. Interval node was freed
759 * in ldlm_lock_destroy. Anyway, this always happens
760 * when a client is being evicted. So it would be
761 * ok to return an error. -jay */
762 if (ldlm_is_destroyed(lock)) {
764 GOTO(out, rc = -EAGAIN);
767 /* lock was granted while resource was unlocked. */
768 if (lock->l_granted_mode == lock->l_req_mode) {
769 /* bug 11300: if the lock has been granted,
770 * break earlier because otherwise, we will go
771 * to restart and ldlm_resource_unlink will be
772 * called and it causes the interval node to be
773 * freed. Then we will fail at
774 * ldlm_extent_add_lock() */
775 *flags &= ~LDLM_FL_BLOCKED_MASK;
782 /* this way we force client to wait for the lock
783 * endlessly once the lock is enqueued -bzzz */
784 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
789 if (!list_empty(&rpc_list)) {
790 LASSERT(!ldlm_is_ast_discard_data(lock));
791 discard_bl_list(&rpc_list);
795 #endif /* HAVE_SERVER_SUPPORT */
797 /* When a lock is cancelled by a client, the KMS may undergo change if this
798 * is the "highest lock". This function returns the new KMS value.
799 * Caller must hold lr_lock already.
801 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
802 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
804 struct ldlm_resource *res = lock->l_resource;
805 struct list_head *tmp;
806 struct ldlm_lock *lck;
810 /* don't let another thread in ldlm_extent_shift_kms race in
811 * just after we finish and take our lock into account in its
812 * calculation of the kms */
813 ldlm_set_kms_ignore(lock);
815 list_for_each(tmp, &res->lr_granted) {
816 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
818 if (ldlm_is_kms_ignore(lck))
821 if (lck->l_policy_data.l_extent.end >= old_kms)
824 /* This extent _has_ to be smaller than old_kms (checked above)
825 * so kms can only ever be smaller or the same as old_kms. */
826 if (lck->l_policy_data.l_extent.end + 1 > kms)
827 kms = lck->l_policy_data.l_extent.end + 1;
829 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
833 EXPORT_SYMBOL(ldlm_extent_shift_kms);
835 struct kmem_cache *ldlm_interval_slab;
836 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
838 struct ldlm_interval *node;
841 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
842 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
846 INIT_LIST_HEAD(&node->li_group);
847 ldlm_interval_attach(node, lock);
851 void ldlm_interval_free(struct ldlm_interval *node)
854 LASSERT(list_empty(&node->li_group));
855 LASSERT(!interval_is_intree(&node->li_node));
856 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
860 /* interval tree, for LDLM_EXTENT. */
861 void ldlm_interval_attach(struct ldlm_interval *n,
864 LASSERT(l->l_tree_node == NULL);
865 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
867 list_add_tail(&l->l_sl_policy, &n->li_group);
871 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
873 struct ldlm_interval *n = l->l_tree_node;
878 LASSERT(!list_empty(&n->li_group));
879 l->l_tree_node = NULL;
880 list_del_init(&l->l_sl_policy);
882 return list_empty(&n->li_group) ? n : NULL;
885 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
890 LASSERT(IS_PO2(mode));
891 for (index = -1; mode != 0; index++, mode >>= 1)
893 LASSERT(index < LCK_MODE_NUM);
897 /** Add newly granted lock into interval tree for the resource. */
898 void ldlm_extent_add_lock(struct ldlm_resource *res,
899 struct ldlm_lock *lock)
901 struct interval_node *found, **root;
902 struct ldlm_interval *node;
903 struct ldlm_extent *extent;
906 LASSERT(lock->l_granted_mode == lock->l_req_mode);
908 node = lock->l_tree_node;
909 LASSERT(node != NULL);
910 LASSERT(!interval_is_intree(&node->li_node));
912 idx = ldlm_mode_to_index(lock->l_granted_mode);
913 LASSERT(lock->l_granted_mode == 1 << idx);
914 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
916 /* node extent initialize */
917 extent = &lock->l_policy_data.l_extent;
918 interval_set(&node->li_node, extent->start, extent->end);
920 root = &res->lr_itree[idx].lit_root;
921 found = interval_insert(&node->li_node, root);
922 if (found) { /* The policy group found. */
923 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
924 LASSERT(tmp != NULL);
925 ldlm_interval_free(tmp);
926 ldlm_interval_attach(to_ldlm_interval(found), lock);
928 res->lr_itree[idx].lit_size++;
930 /* even though we use interval tree to manage the extent lock, we also
931 * add the locks into grant list, for debug purpose, .. */
932 ldlm_resource_add_lock(res, &res->lr_granted, lock);
935 /** Remove cancelled lock from resource interval tree. */
936 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
938 struct ldlm_resource *res = lock->l_resource;
939 struct ldlm_interval *node = lock->l_tree_node;
940 struct ldlm_interval_tree *tree;
943 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
946 idx = ldlm_mode_to_index(lock->l_granted_mode);
947 LASSERT(lock->l_granted_mode == 1 << idx);
948 tree = &res->lr_itree[idx];
950 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
953 node = ldlm_interval_detach(lock);
955 interval_erase(&node->li_node, &tree->lit_root);
956 ldlm_interval_free(node);
960 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
961 union ldlm_policy_data *lpolicy)
963 memset(lpolicy, 0, sizeof(*lpolicy));
964 lpolicy->l_extent.start = wpolicy->l_extent.start;
965 lpolicy->l_extent.end = wpolicy->l_extent.end;
966 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
969 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
970 union ldlm_wire_policy_data *wpolicy)
972 memset(wpolicy, 0, sizeof(*wpolicy));
973 wpolicy->l_extent.start = lpolicy->l_extent.start;
974 wpolicy->l_extent.end = lpolicy->l_extent.end;
975 wpolicy->l_extent.gid = lpolicy->l_extent.gid;