4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_extent.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
43 * This file contains implementation of EXTENT lock type
45 * EXTENT lock type is for locking a contiguous range of values, represented
46 * by 64-bit starting and ending offsets (inclusive). There are several extent
47 * lock modes, some of which may be mutually incompatible. Extent locks are
48 * considered incompatible if their modes are incompatible and their extents
49 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
52 #define DEBUG_SUBSYSTEM S_LDLM
54 #include <libcfs/libcfs.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
58 #include <obd_class.h>
59 #include <lustre_lib.h>
61 #include "ldlm_internal.h"
63 #ifdef HAVE_SERVER_SUPPORT
64 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
67 * Fix up the ldlm_extent after expanding it.
69 * After expansion has been done, we might still want to do certain adjusting
70 * based on overall contention of the resource and the like to avoid granting
73 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
74 struct ldlm_extent *new_ex,
77 ldlm_mode_t req_mode = req->l_req_mode;
78 __u64 req_start = req->l_req_extent.start;
79 __u64 req_end = req->l_req_extent.end;
80 __u64 req_align, mask;
82 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
83 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
84 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
88 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
93 /* we need to ensure that the lock extent is properly aligned to what
94 * the client requested. Also we need to make sure it's also server
95 * page size aligned otherwise a server page can be covered by two
97 mask = PAGE_CACHE_SIZE;
98 req_align = (req_end + 1) | req_start;
99 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
100 while ((req_align & mask) == 0)
104 /* We can only shrink the lock, not grow it.
105 * This should never cause lock to be smaller than requested,
106 * since requested lock was already aligned on these boundaries. */
107 new_ex->start = ((new_ex->start - 1) | mask) + 1;
108 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
109 LASSERTF(new_ex->start <= req_start,
110 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
111 mask, new_ex->start, req_start);
112 LASSERTF(new_ex->end >= req_end,
113 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
114 mask, new_ex->end, req_end);
118 * Return the maximum extent that:
119 * - contains the requested extent
120 * - does not overlap existing conflicting extents outside the requested one
122 * This allows clients to request a small required extent range, but if there
123 * is no contention on the lock the full lock can be granted to the client.
124 * This avoids the need for many smaller lock requests to be granted in the
125 * common (uncontended) case.
127 * Use interval tree to expand the lock extent for granted lock.
129 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
130 struct ldlm_extent *new_ex)
132 struct ldlm_resource *res = req->l_resource;
133 ldlm_mode_t req_mode = req->l_req_mode;
134 __u64 req_start = req->l_req_extent.start;
135 __u64 req_end = req->l_req_extent.end;
136 struct ldlm_interval_tree *tree;
137 struct interval_node_extent limiter = { new_ex->start, new_ex->end };
142 lockmode_verify(req_mode);
144 /* Using interval tree to handle the LDLM extent granted locks. */
145 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
146 struct interval_node_extent ext = { req_start, req_end };
148 tree = &res->lr_itree[idx];
149 if (lockmode_compat(tree->lit_mode, req_mode))
152 conflicting += tree->lit_size;
154 limiter.start = req_start;
156 if (interval_is_overlapped(tree->lit_root, &ext))
158 "req_mode = %d, tree->lit_mode = %d, "
159 "tree->lit_size = %d\n",
160 req_mode, tree->lit_mode, tree->lit_size);
161 interval_expand(tree->lit_root, &ext, &limiter);
162 limiter.start = max(limiter.start, ext.start);
163 limiter.end = min(limiter.end, ext.end);
164 if (limiter.start == req_start && limiter.end == req_end)
168 new_ex->start = limiter.start;
169 new_ex->end = limiter.end;
170 LASSERT(new_ex->start <= req_start);
171 LASSERT(new_ex->end >= req_end);
173 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
177 /* The purpose of this function is to return:
178 * - the maximum extent
179 * - containing the requested extent
180 * - and not overlapping existing conflicting extents outside the requested one
183 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
184 struct ldlm_extent *new_ex)
186 struct ldlm_resource *res = req->l_resource;
187 ldlm_mode_t req_mode = req->l_req_mode;
188 __u64 req_start = req->l_req_extent.start;
189 __u64 req_end = req->l_req_extent.end;
190 struct ldlm_lock *lock;
194 lockmode_verify(req_mode);
196 /* for waiting locks */
197 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
198 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
200 /* We already hit the minimum requested size, search no more */
201 if (new_ex->start == req_start && new_ex->end == req_end) {
206 /* Don't conflict with ourselves */
210 /* Locks are compatible, overlap doesn't matter */
211 /* Until bug 20 is fixed, try to avoid granting overlapping
212 * locks on one client (they take a long time to cancel) */
213 if (lockmode_compat(lock->l_req_mode, req_mode) &&
214 lock->l_export != req->l_export)
217 /* If this is a high-traffic lock, don't grow downwards at all
218 * or grow upwards too much */
221 new_ex->start = req_start;
223 /* If lock doesn't overlap new_ex, skip it. */
224 if (!ldlm_extent_overlap(l_extent, new_ex))
227 /* Locks conflicting in requested extents and we can't satisfy
228 * both locks, so ignore it. Either we will ping-pong this
229 * extent (we would regardless of what extent we granted) or
230 * lock is unused and it shouldn't limit our extent growth. */
231 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234 /* We grow extents downwards only as far as they don't overlap
235 * with already-granted locks, on the assumption that clients
236 * will be writing beyond the initial requested end and would
237 * then need to enqueue a new lock beyond previous request.
238 * l_req_extent->end strictly < req_start, checked above. */
239 if (l_extent->start < req_start && new_ex->start != req_start) {
240 if (l_extent->end >= req_start)
241 new_ex->start = req_start;
243 new_ex->start = min(l_extent->end+1, req_start);
246 /* If we need to cancel this lock anyways because our request
247 * overlaps the granted lock, we grow up to its requested
248 * extent start instead of limiting this extent, assuming that
249 * clients are writing forwards and the lock had over grown
250 * its extent downwards before we enqueued our request. */
251 if (l_extent->end > req_end) {
252 if (l_extent->start <= req_end)
253 new_ex->end = max(lock->l_req_extent.start - 1,
256 new_ex->end = max(l_extent->start - 1, req_end);
260 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
265 /* In order to determine the largest possible extent we can grant, we need
266 * to scan all of the queues. */
267 static void ldlm_extent_policy(struct ldlm_resource *res,
268 struct ldlm_lock *lock, __u64 *flags)
270 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
272 if (lock->l_export == NULL)
274 * this is local lock taken by server (e.g., as a part of
275 * OST-side locking, or unlink handling). Expansion doesn't
276 * make a lot of sense for local locks, because they are
277 * dropped immediately on operation completion and would only
278 * conflict with other threads.
282 if (lock->l_policy_data.l_extent.start == 0 &&
283 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
284 /* fast-path whole file locks */
287 ldlm_extent_internal_policy_granted(lock, &new_ex);
288 ldlm_extent_internal_policy_waiting(lock, &new_ex);
290 if (new_ex.start != lock->l_policy_data.l_extent.start ||
291 new_ex.end != lock->l_policy_data.l_extent.end) {
292 *flags |= LDLM_FL_LOCK_CHANGED;
293 lock->l_policy_data.l_extent.start = new_ex.start;
294 lock->l_policy_data.l_extent.end = new_ex.end;
298 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
300 struct ldlm_resource *res = lock->l_resource;
301 cfs_time_t now = cfs_time_current();
303 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
306 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
307 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
308 res->lr_contention_time = now;
309 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
310 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
313 struct ldlm_extent_compat_args {
314 struct list_head *work_list;
315 struct ldlm_lock *lock;
321 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
324 struct ldlm_extent_compat_args *priv = data;
325 struct ldlm_interval *node = to_ldlm_interval(n);
326 struct ldlm_extent *extent;
327 struct list_head *work_list = priv->work_list;
328 struct ldlm_lock *lock, *enq = priv->lock;
329 ldlm_mode_t mode = priv->mode;
333 LASSERT(!list_empty(&node->li_group));
335 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
336 /* interval tree is for granted lock */
337 LASSERTF(mode == lock->l_granted_mode,
338 "mode = %s, lock->l_granted_mode = %s\n",
340 ldlm_lockname[lock->l_granted_mode]);
342 if (lock->l_blocking_ast)
343 ldlm_add_ast_work_item(lock, enq, work_list);
346 /* don't count conflicting glimpse locks */
347 extent = ldlm_interval_extent(node);
348 if (!(mode == LCK_PR &&
349 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
350 *priv->locks += count;
355 RETURN(INTERVAL_ITER_CONT);
359 * Determine if the lock is compatible with all locks on the queue.
361 * If \a work_list is provided, conflicting locks are linked there.
362 * If \a work_list is not provided, we exit this function on first conflict.
364 * \retval 0 if the lock is not compatible
365 * \retval 1 if the lock is compatible
366 * \retval 2 if \a req is a group lock and it is compatible and requires
367 * no further checking
368 * \retval negative error, such as EWOULDBLOCK for group locks
371 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
372 __u64 *flags, ldlm_error_t *err,
373 struct list_head *work_list, int *contended_locks)
375 struct ldlm_resource *res = req->l_resource;
376 ldlm_mode_t req_mode = req->l_req_mode;
377 __u64 req_start = req->l_req_extent.start;
378 __u64 req_end = req->l_req_extent.end;
379 struct ldlm_lock *lock;
380 int check_contention;
385 lockmode_verify(req_mode);
387 /* Using interval tree for granted lock */
388 if (queue == &res->lr_granted) {
389 struct ldlm_interval_tree *tree;
390 struct ldlm_extent_compat_args data = {.work_list = work_list,
392 .locks = contended_locks,
394 struct interval_node_extent ex = { .start = req_start,
398 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
399 tree = &res->lr_itree[idx];
400 if (tree->lit_root == NULL) /* empty tree, skipped */
403 data.mode = tree->lit_mode;
404 if (lockmode_compat(req_mode, tree->lit_mode)) {
405 struct ldlm_interval *node;
406 struct ldlm_extent *extent;
408 if (req_mode != LCK_GROUP)
411 /* group lock, grant it immediately if
413 node = to_ldlm_interval(tree->lit_root);
414 extent = ldlm_interval_extent(node);
415 if (req->l_policy_data.l_extent.gid ==
420 if (tree->lit_mode == LCK_GROUP) {
421 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
422 compat = -EWOULDBLOCK;
426 *flags |= LDLM_FL_NO_TIMEOUT;
430 /* if work list is not NULL,add all
431 locks in the tree to work list */
433 interval_iterate(tree->lit_root,
434 ldlm_extent_compat_cb, &data);
439 rc = interval_is_overlapped(tree->lit_root,&ex);
443 interval_search(tree->lit_root, &ex,
444 ldlm_extent_compat_cb, &data);
445 if (!list_empty(work_list) && compat)
449 } else { /* for waiting queue */
450 list_for_each_entry(lock, queue, l_res_link) {
451 check_contention = 1;
453 /* We stop walking the queue if we hit ourselves so
454 * we don't take conflicting locks enqueued after us
455 * into account, or we'd wait forever. */
459 if (unlikely(scan)) {
460 /* We only get here if we are queuing GROUP lock
461 and met some incompatible one. The main idea of this
462 code is to insert GROUP lock past compatible GROUP
463 lock in the waiting queue or if there is not any,
464 then in front of first non-GROUP lock */
465 if (lock->l_req_mode != LCK_GROUP) {
466 /* Ok, we hit non-GROUP lock, there should
467 * be no more GROUP locks later on, queue in
468 * front of first non-GROUP lock */
470 ldlm_resource_insert_lock_after(lock, req);
471 list_del_init(&lock->l_res_link);
472 ldlm_resource_insert_lock_after(req, lock);
476 if (req->l_policy_data.l_extent.gid ==
477 lock->l_policy_data.l_extent.gid) {
479 ldlm_resource_insert_lock_after(lock, req);
486 /* locks are compatible, overlap doesn't matter */
487 if (lockmode_compat(lock->l_req_mode, req_mode)) {
488 if (req_mode == LCK_PR &&
489 ((lock->l_policy_data.l_extent.start <=
490 req->l_policy_data.l_extent.start) &&
491 (lock->l_policy_data.l_extent.end >=
492 req->l_policy_data.l_extent.end))) {
493 /* If we met a PR lock just like us or
494 wider, and nobody down the list
495 conflicted with it, that means we
496 can skip processing of the rest of
497 the list and safely place ourselves
498 at the end of the list, or grant
499 (dependent if we met an conflicting
500 locks before in the list). In case
501 of 1st enqueue only we continue
502 traversing if there is something
503 conflicting down the list because
504 we need to make sure that something
505 is marked as AST_SENT as well, in
506 cse of empy worklist we would exit
507 on first conflict met. */
508 /* There IS a case where such flag is
509 not set for a lock, yet it blocks
510 something. Luckily for us this is
511 only during destroy, so lock is
512 exclusive. So here we are safe */
513 if (!ldlm_is_ast_sent(lock))
517 /* non-group locks are compatible, overlap doesn't
519 if (likely(req_mode != LCK_GROUP))
522 /* If we are trying to get a GROUP lock and there is
523 another one of this kind, we need to compare gid */
524 if (req->l_policy_data.l_extent.gid ==
525 lock->l_policy_data.l_extent.gid) {
526 /* If existing lock with matched gid is granted,
527 we grant new one too. */
528 if (lock->l_req_mode == lock->l_granted_mode)
531 /* Otherwise we are scanning queue of waiting
532 * locks and it means current request would
533 * block along with existing lock (that is
535 * If we are in nonblocking mode - return
537 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
538 compat = -EWOULDBLOCK;
541 /* If this group lock is compatible with another
542 * group lock on the waiting list, they must be
543 * together in the list, so they can be granted
544 * at the same time. Otherwise the later lock
545 * can get stuck behind another, incompatible,
547 ldlm_resource_insert_lock_after(lock, req);
548 /* Because 'lock' is not granted, we can stop
549 * processing this queue and return immediately.
550 * There is no need to check the rest of the
556 if (unlikely(req_mode == LCK_GROUP &&
557 (lock->l_req_mode != lock->l_granted_mode))) {
560 if (lock->l_req_mode != LCK_GROUP) {
561 /* Ok, we hit non-GROUP lock, there should be no
562 more GROUP locks later on, queue in front of
563 first non-GROUP lock */
565 ldlm_resource_insert_lock_after(lock, req);
566 list_del_init(&lock->l_res_link);
567 ldlm_resource_insert_lock_after(req, lock);
570 if (req->l_policy_data.l_extent.gid ==
571 lock->l_policy_data.l_extent.gid) {
573 ldlm_resource_insert_lock_after(lock, req);
579 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
580 /* If compared lock is GROUP, then requested is PR/PW/
581 * so this is not compatible; extent range does not
583 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
584 compat = -EWOULDBLOCK;
587 *flags |= LDLM_FL_NO_TIMEOUT;
589 } else if (lock->l_policy_data.l_extent.end < req_start ||
590 lock->l_policy_data.l_extent.start > req_end) {
591 /* if a non group lock doesn't overlap skip it */
593 } else if (lock->l_req_extent.end < req_start ||
594 lock->l_req_extent.start > req_end) {
595 /* false contention, the requests doesn't really overlap */
596 check_contention = 0;
602 /* don't count conflicting glimpse locks */
603 if (lock->l_req_mode == LCK_PR &&
604 lock->l_policy_data.l_extent.start == 0 &&
605 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
606 check_contention = 0;
608 *contended_locks += check_contention;
611 if (lock->l_blocking_ast)
612 ldlm_add_ast_work_item(lock, req, work_list);
616 if (ldlm_check_contention(req, *contended_locks) &&
618 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
619 req->l_req_mode != LCK_GROUP &&
620 req_end - req_start <=
621 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
622 GOTO(destroylock, compat = -EUSERS);
626 list_del_init(&req->l_res_link);
627 ldlm_lock_destroy_nolock(req);
633 * Discard all AST work items from list.
635 * If for whatever reason we do not want to send ASTs to conflicting locks
636 * anymore, disassemble the list with this function.
638 static void discard_bl_list(struct list_head *bl_list)
640 struct list_head *tmp, *pos;
643 list_for_each_safe(pos, tmp, bl_list) {
644 struct ldlm_lock *lock =
645 list_entry(pos, struct ldlm_lock, l_bl_ast);
647 list_del_init(&lock->l_bl_ast);
648 LASSERT(ldlm_is_ast_sent(lock));
649 ldlm_clear_ast_sent(lock);
650 LASSERT(lock->l_bl_ast_run == 0);
651 LASSERT(lock->l_blocking_lock);
652 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
653 lock->l_blocking_lock = NULL;
654 LDLM_LOCK_RELEASE(lock);
660 * Process a granting attempt for extent lock.
661 * Must be called with ns lock held.
663 * This function looks for any conflicts for \a lock in the granted or
664 * waiting queues. The lock is granted if no conflicts are found in
667 * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
668 * - blocking ASTs have already been sent
670 * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
671 * - blocking ASTs have not been sent yet, so list of conflicting locks
672 * would be collected and ASTs sent.
674 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
675 int first_enq, ldlm_error_t *err,
676 struct list_head *work_list)
678 struct ldlm_resource *res = lock->l_resource;
679 struct list_head rpc_list;
681 int contended_locks = 0;
684 LASSERT(lock->l_granted_mode != lock->l_req_mode);
685 LASSERT(list_empty(&res->lr_converting));
686 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
687 !ldlm_is_ast_discard_data(lock));
688 INIT_LIST_HEAD(&rpc_list);
689 check_res_locked(res);
693 /* Careful observers will note that we don't handle -EWOULDBLOCK
694 * here, but it's ok for a non-obvious reason -- compat_queue
695 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
696 * flags should always be zero here, and if that ever stops
697 * being true, we want to find out. */
698 LASSERT(*flags == 0);
699 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
700 err, NULL, &contended_locks);
702 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
707 RETURN(LDLM_ITER_STOP);
709 ldlm_resource_unlink_lock(lock);
711 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
712 ldlm_extent_policy(res, lock, flags);
713 ldlm_grant_lock(lock, work_list);
714 RETURN(LDLM_ITER_CONTINUE);
719 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
720 &rpc_list, &contended_locks);
722 GOTO(out, rc); /* lock was destroyed */
726 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
727 &rpc_list, &contended_locks);
729 GOTO(out, rc = rc2); /* lock was destroyed */
733 ldlm_extent_policy(res, lock, flags);
734 ldlm_resource_unlink_lock(lock);
735 ldlm_grant_lock(lock, NULL);
737 /* If either of the compat_queue()s returned failure, then we
738 * have ASTs to send and must go onto the waiting list.
740 * bug 2322: we used to unlink and re-add here, which was a
741 * terrible folly -- if we goto restart, we could get
742 * re-ordered! Causes deadlock, because ASTs aren't sent! */
743 if (list_empty(&lock->l_res_link))
744 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
746 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
749 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
750 !ns_is_client(ldlm_res_to_ns(res)))
751 class_fail_export(lock->l_export);
754 if (rc == -ERESTART) {
755 /* 15715: The lock was granted and destroyed after
756 * resource lock was dropped. Interval node was freed
757 * in ldlm_lock_destroy. Anyway, this always happens
758 * when a client is being evicted. So it would be
759 * ok to return an error. -jay */
760 if (ldlm_is_destroyed(lock)) {
762 GOTO(out, rc = -EAGAIN);
765 /* lock was granted while resource was unlocked. */
766 if (lock->l_granted_mode == lock->l_req_mode) {
767 /* bug 11300: if the lock has been granted,
768 * break earlier because otherwise, we will go
769 * to restart and ldlm_resource_unlink will be
770 * called and it causes the interval node to be
771 * freed. Then we will fail at
772 * ldlm_extent_add_lock() */
773 *flags &= ~LDLM_FL_BLOCKED_MASK;
780 /* this way we force client to wait for the lock
781 * endlessly once the lock is enqueued -bzzz */
782 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
787 if (!list_empty(&rpc_list)) {
788 LASSERT(!ldlm_is_ast_discard_data(lock));
789 discard_bl_list(&rpc_list);
793 #endif /* HAVE_SERVER_SUPPORT */
795 /* When a lock is cancelled by a client, the KMS may undergo change if this
796 * is the "highest lock". This function returns the new KMS value.
797 * Caller must hold lr_lock already.
799 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
800 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
802 struct ldlm_resource *res = lock->l_resource;
803 struct list_head *tmp;
804 struct ldlm_lock *lck;
808 /* don't let another thread in ldlm_extent_shift_kms race in
809 * just after we finish and take our lock into account in its
810 * calculation of the kms */
811 ldlm_set_kms_ignore(lock);
813 list_for_each(tmp, &res->lr_granted) {
814 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
816 if (ldlm_is_kms_ignore(lck))
819 if (lck->l_policy_data.l_extent.end >= old_kms)
822 /* This extent _has_ to be smaller than old_kms (checked above)
823 * so kms can only ever be smaller or the same as old_kms. */
824 if (lck->l_policy_data.l_extent.end + 1 > kms)
825 kms = lck->l_policy_data.l_extent.end + 1;
827 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
831 EXPORT_SYMBOL(ldlm_extent_shift_kms);
833 struct kmem_cache *ldlm_interval_slab;
834 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
836 struct ldlm_interval *node;
839 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
840 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
844 INIT_LIST_HEAD(&node->li_group);
845 ldlm_interval_attach(node, lock);
849 void ldlm_interval_free(struct ldlm_interval *node)
852 LASSERT(list_empty(&node->li_group));
853 LASSERT(!interval_is_intree(&node->li_node));
854 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
858 /* interval tree, for LDLM_EXTENT. */
859 void ldlm_interval_attach(struct ldlm_interval *n,
862 LASSERT(l->l_tree_node == NULL);
863 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
865 list_add_tail(&l->l_sl_policy, &n->li_group);
869 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
871 struct ldlm_interval *n = l->l_tree_node;
876 LASSERT(!list_empty(&n->li_group));
877 l->l_tree_node = NULL;
878 list_del_init(&l->l_sl_policy);
880 return list_empty(&n->li_group) ? n : NULL;
883 static inline int lock_mode_to_index(ldlm_mode_t mode)
888 LASSERT(IS_PO2(mode));
889 for (index = -1; mode; index++, mode >>= 1) ;
890 LASSERT(index < LCK_MODE_NUM);
894 /** Add newly granted lock into interval tree for the resource. */
895 void ldlm_extent_add_lock(struct ldlm_resource *res,
896 struct ldlm_lock *lock)
898 struct interval_node *found, **root;
899 struct ldlm_interval *node;
900 struct ldlm_extent *extent;
903 LASSERT(lock->l_granted_mode == lock->l_req_mode);
905 node = lock->l_tree_node;
906 LASSERT(node != NULL);
907 LASSERT(!interval_is_intree(&node->li_node));
909 idx = lock_mode_to_index(lock->l_granted_mode);
910 LASSERT(lock->l_granted_mode == 1 << idx);
911 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
913 /* node extent initialize */
914 extent = &lock->l_policy_data.l_extent;
915 interval_set(&node->li_node, extent->start, extent->end);
917 root = &res->lr_itree[idx].lit_root;
918 found = interval_insert(&node->li_node, root);
919 if (found) { /* The policy group found. */
920 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
921 LASSERT(tmp != NULL);
922 ldlm_interval_free(tmp);
923 ldlm_interval_attach(to_ldlm_interval(found), lock);
925 res->lr_itree[idx].lit_size++;
927 /* even though we use interval tree to manage the extent lock, we also
928 * add the locks into grant list, for debug purpose, .. */
929 ldlm_resource_add_lock(res, &res->lr_granted, lock);
932 /** Remove cancelled lock from resource interval tree. */
933 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
935 struct ldlm_resource *res = lock->l_resource;
936 struct ldlm_interval *node = lock->l_tree_node;
937 struct ldlm_interval_tree *tree;
940 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
943 idx = lock_mode_to_index(lock->l_granted_mode);
944 LASSERT(lock->l_granted_mode == 1 << idx);
945 tree = &res->lr_itree[idx];
947 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
950 node = ldlm_interval_detach(lock);
952 interval_erase(&node->li_node, &tree->lit_root);
953 ldlm_interval_free(node);
957 void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
958 ldlm_policy_data_t *lpolicy)
960 memset(lpolicy, 0, sizeof(*lpolicy));
961 lpolicy->l_extent.start = wpolicy->l_extent.start;
962 lpolicy->l_extent.end = wpolicy->l_extent.end;
963 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
966 void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
967 ldlm_wire_policy_data_t *wpolicy)
969 memset(wpolicy, 0, sizeof(*wpolicy));
970 wpolicy->l_extent.start = lpolicy->l_extent.start;
971 wpolicy->l_extent.end = lpolicy->l_extent.end;
972 wpolicy->l_extent.gid = lpolicy->l_extent.gid;