4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2013, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ldlm/ldlm_extent.c
34 * Author: Peter Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 * This file contains implementation of EXTENT lock type
41 * EXTENT lock type is for locking a contiguous range of values, represented
42 * by 64-bit starting and ending offsets (inclusive). There are several extent
43 * lock modes, some of which may be mutually incompatible. Extent locks are
44 * considered incompatible if their modes are incompatible and their extents
45 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
48 #define DEBUG_SUBSYSTEM S_LDLM
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
57 #include "ldlm_internal.h"
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
63 * Fix up the ldlm_extent after expanding it.
65 * After expansion has been done, we might still want to do certain adjusting
66 * based on overall contention of the resource and the like to avoid granting
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70 struct ldlm_extent *new_ex,
73 enum ldlm_mode req_mode = req->l_req_mode;
74 __u64 req_start = req->l_req_extent.start;
75 __u64 req_end = req->l_req_extent.end;
76 __u64 req_align, mask;
78 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
84 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
89 /* we need to ensure that the lock extent is properly aligned to what
90 * the client requested. Also we need to make sure it's also server
91 * page size aligned otherwise a server page can be covered by two
94 req_align = (req_end + 1) | req_start;
95 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96 while ((req_align & mask) == 0)
100 /* We can only shrink the lock, not grow it.
101 * This should never cause lock to be smaller than requested,
102 * since requested lock was already aligned on these boundaries. */
103 new_ex->start = ((new_ex->start - 1) | mask) + 1;
104 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105 LASSERTF(new_ex->start <= req_start,
106 "mask %#llx grant start %llu req start %llu\n",
107 mask, new_ex->start, req_start);
108 LASSERTF(new_ex->end >= req_end,
109 "mask %#llx grant end %llu req end %llu\n",
110 mask, new_ex->end, req_end);
114 * Return the maximum extent that:
115 * - contains the requested extent
116 * - does not overlap existing conflicting extents outside the requested one
118 * This allows clients to request a small required extent range, but if there
119 * is no contention on the lock the full lock can be granted to the client.
120 * This avoids the need for many smaller lock requests to be granted in the
121 * common (uncontended) case.
123 * Use interval tree to expand the lock extent for granted lock.
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126 struct ldlm_extent *new_ex)
128 struct ldlm_resource *res = req->l_resource;
129 enum ldlm_mode req_mode = req->l_req_mode;
130 __u64 req_start = req->l_req_extent.start;
131 __u64 req_end = req->l_req_extent.end;
132 struct ldlm_interval_tree *tree;
133 struct interval_node_extent limiter = {
134 .start = new_ex->start,
141 lockmode_verify(req_mode);
143 /* Using interval tree to handle the LDLM extent granted locks. */
144 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145 struct interval_node_extent ext = {
150 tree = &res->lr_itree[idx];
151 if (lockmode_compat(tree->lit_mode, req_mode))
154 conflicting += tree->lit_size;
156 limiter.start = req_start;
158 if (interval_is_overlapped(tree->lit_root, &ext))
160 "req_mode = %d, tree->lit_mode = %d, "
161 "tree->lit_size = %d\n",
162 req_mode, tree->lit_mode, tree->lit_size);
163 interval_expand(tree->lit_root, &ext, &limiter);
164 limiter.start = max(limiter.start, ext.start);
165 limiter.end = min(limiter.end, ext.end);
166 if (limiter.start == req_start && limiter.end == req_end)
170 new_ex->start = limiter.start;
171 new_ex->end = limiter.end;
172 LASSERT(new_ex->start <= req_start);
173 LASSERT(new_ex->end >= req_end);
175 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
179 /* The purpose of this function is to return:
180 * - the maximum extent
181 * - containing the requested extent
182 * - and not overlapping existing conflicting extents outside the requested one
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186 struct ldlm_extent *new_ex)
188 struct ldlm_resource *res = req->l_resource;
189 enum ldlm_mode req_mode = req->l_req_mode;
190 __u64 req_start = req->l_req_extent.start;
191 __u64 req_end = req->l_req_extent.end;
192 struct ldlm_lock *lock;
196 lockmode_verify(req_mode);
198 /* for waiting locks */
199 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
202 /* We already hit the minimum requested size, search no more */
203 if (new_ex->start == req_start && new_ex->end == req_end) {
208 /* Don't conflict with ourselves */
212 /* Locks are compatible, overlap doesn't matter */
213 /* Until bug 20 is fixed, try to avoid granting overlapping
214 * locks on one client (they take a long time to cancel) */
215 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216 lock->l_export != req->l_export)
219 /* If this is a high-traffic lock, don't grow downwards at all
220 * or grow upwards too much */
223 new_ex->start = req_start;
225 /* If lock doesn't overlap new_ex, skip it. */
226 if (!ldlm_extent_overlap(l_extent, new_ex))
229 /* Locks conflicting in requested extents and we can't satisfy
230 * both locks, so ignore it. Either we will ping-pong this
231 * extent (we would regardless of what extent we granted) or
232 * lock is unused and it shouldn't limit our extent growth. */
233 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
236 /* We grow extents downwards only as far as they don't overlap
237 * with already-granted locks, on the assumption that clients
238 * will be writing beyond the initial requested end and would
239 * then need to enqueue a new lock beyond previous request.
240 * l_req_extent->end strictly < req_start, checked above. */
241 if (l_extent->start < req_start && new_ex->start != req_start) {
242 if (l_extent->end >= req_start)
243 new_ex->start = req_start;
245 new_ex->start = min(l_extent->end+1, req_start);
248 /* If we need to cancel this lock anyways because our request
249 * overlaps the granted lock, we grow up to its requested
250 * extent start instead of limiting this extent, assuming that
251 * clients are writing forwards and the lock had over grown
252 * its extent downwards before we enqueued our request. */
253 if (l_extent->end > req_end) {
254 if (l_extent->start <= req_end)
255 new_ex->end = max(lock->l_req_extent.start - 1,
258 new_ex->end = max(l_extent->start - 1, req_end);
262 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
267 /* In order to determine the largest possible extent we can grant, we need
268 * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270 struct ldlm_lock *lock, __u64 *flags)
272 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
274 if (lock->l_export == NULL)
276 * this is a local lock taken by server (e.g., as a part of
277 * OST-side locking, or unlink handling). Expansion doesn't
278 * make a lot of sense for local locks, because they are
279 * dropped immediately on operation completion and would only
280 * conflict with other threads.
284 if (lock->l_policy_data.l_extent.start == 0 &&
285 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286 /* fast-path whole file locks */
289 /* Because reprocess_queue zeroes flags and uses it to return
290 * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
291 * in the lock flags rather than the 'flags' argument */
292 if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
293 ldlm_extent_internal_policy_granted(lock, &new_ex);
294 ldlm_extent_internal_policy_waiting(lock, &new_ex);
296 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
297 new_ex.start = lock->l_policy_data.l_extent.start;
298 new_ex.end = lock->l_policy_data.l_extent.end;
299 /* In case the request is not on correct boundaries, we call
300 * fixup. (normally called in ldlm_extent_internal_policy_*) */
301 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
304 if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
305 *flags |= LDLM_FL_LOCK_CHANGED;
306 lock->l_policy_data.l_extent.start = new_ex.start;
307 lock->l_policy_data.l_extent.end = new_ex.end;
311 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
313 struct ldlm_resource *res = lock->l_resource;
314 cfs_time_t now = cfs_time_current();
316 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
319 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
320 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
321 res->lr_contention_time = now;
322 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
323 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
326 struct ldlm_extent_compat_args {
327 struct list_head *work_list;
328 struct ldlm_lock *lock;
334 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
337 struct ldlm_extent_compat_args *priv = data;
338 struct ldlm_interval *node = to_ldlm_interval(n);
339 struct ldlm_extent *extent;
340 struct list_head *work_list = priv->work_list;
341 struct ldlm_lock *lock, *enq = priv->lock;
342 enum ldlm_mode mode = priv->mode;
346 LASSERT(!list_empty(&node->li_group));
348 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
349 /* interval tree is for granted lock */
350 LASSERTF(mode == lock->l_granted_mode,
351 "mode = %s, lock->l_granted_mode = %s\n",
353 ldlm_lockname[lock->l_granted_mode]);
355 if (lock->l_blocking_ast &&
356 lock->l_granted_mode != LCK_GROUP)
357 ldlm_add_ast_work_item(lock, enq, work_list);
360 /* don't count conflicting glimpse locks */
361 extent = ldlm_interval_extent(node);
362 if (!(mode == LCK_PR &&
363 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
364 *priv->locks += count;
369 RETURN(INTERVAL_ITER_CONT);
373 * Determine if the lock is compatible with all locks on the queue.
375 * If \a work_list is provided, conflicting locks are linked there.
376 * If \a work_list is not provided, we exit this function on first conflict.
378 * \retval 0 if the lock is not compatible
379 * \retval 1 if the lock is compatible
380 * \retval 2 if \a req is a group lock and it is compatible and requires
381 * no further checking
382 * \retval negative error, such as EWOULDBLOCK for group locks
385 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
386 __u64 *flags, enum ldlm_error *err,
387 struct list_head *work_list, int *contended_locks)
389 struct ldlm_resource *res = req->l_resource;
390 enum ldlm_mode req_mode = req->l_req_mode;
391 __u64 req_start = req->l_req_extent.start;
392 __u64 req_end = req->l_req_extent.end;
393 struct ldlm_lock *lock;
394 int check_contention;
399 lockmode_verify(req_mode);
401 /* Using interval tree for granted lock */
402 if (queue == &res->lr_granted) {
403 struct ldlm_interval_tree *tree;
404 struct ldlm_extent_compat_args data = {.work_list = work_list,
406 .locks = contended_locks,
408 struct interval_node_extent ex = { .start = req_start,
412 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
413 tree = &res->lr_itree[idx];
414 if (tree->lit_root == NULL) /* empty tree, skipped */
417 data.mode = tree->lit_mode;
418 if (lockmode_compat(req_mode, tree->lit_mode)) {
419 struct ldlm_interval *node;
420 struct ldlm_extent *extent;
422 if (req_mode != LCK_GROUP)
425 /* group lock, grant it immediately if
427 node = to_ldlm_interval(tree->lit_root);
428 extent = ldlm_interval_extent(node);
429 if (req->l_policy_data.l_extent.gid ==
434 if (tree->lit_mode == LCK_GROUP) {
435 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
436 LDLM_FL_SPECULATIVE)) {
437 compat = -EWOULDBLOCK;
441 *flags |= LDLM_FL_NO_TIMEOUT;
445 /* if work list is not NULL,add all
446 locks in the tree to work list */
448 interval_iterate(tree->lit_root,
449 ldlm_extent_compat_cb, &data);
453 /* We've found a potentially blocking lock, check
454 * compatibility. This handles locks other than GROUP
455 * locks, which are handled separately above.
457 * Locks with FL_SPECULATIVE are asynchronous requests
458 * which must never wait behind another lock, so they
459 * fail if any conflicting lock is found. */
460 if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
461 rc = interval_is_overlapped(tree->lit_root,
467 compat = -EWOULDBLOCK;
472 interval_search(tree->lit_root, &ex,
473 ldlm_extent_compat_cb, &data);
474 if (!list_empty(work_list) && compat)
478 } else { /* for waiting queue */
479 list_for_each_entry(lock, queue, l_res_link) {
480 check_contention = 1;
482 /* We stop walking the queue if we hit ourselves so
483 * we don't take conflicting locks enqueued after us
484 * into account, or we'd wait forever. */
488 if (unlikely(scan)) {
489 /* We only get here if we are queuing GROUP lock
490 and met some incompatible one. The main idea of this
491 code is to insert GROUP lock past compatible GROUP
492 lock in the waiting queue or if there is not any,
493 then in front of first non-GROUP lock */
494 if (lock->l_req_mode != LCK_GROUP) {
495 /* Ok, we hit non-GROUP lock, there should
496 * be no more GROUP locks later on, queue in
497 * front of first non-GROUP lock */
499 ldlm_resource_insert_lock_after(lock, req);
500 list_del_init(&lock->l_res_link);
501 ldlm_resource_insert_lock_after(req, lock);
505 if (req->l_policy_data.l_extent.gid ==
506 lock->l_policy_data.l_extent.gid) {
508 ldlm_resource_insert_lock_after(lock, req);
515 /* locks are compatible, overlap doesn't matter */
516 if (lockmode_compat(lock->l_req_mode, req_mode)) {
517 if (req_mode == LCK_PR &&
518 ((lock->l_policy_data.l_extent.start <=
519 req->l_policy_data.l_extent.start) &&
520 (lock->l_policy_data.l_extent.end >=
521 req->l_policy_data.l_extent.end))) {
522 /* If we met a PR lock just like us or
523 wider, and nobody down the list
524 conflicted with it, that means we
525 can skip processing of the rest of
526 the list and safely place ourselves
527 at the end of the list, or grant
528 (dependent if we met an conflicting
529 locks before in the list). In case
530 of 1st enqueue only we continue
531 traversing if there is something
532 conflicting down the list because
533 we need to make sure that something
534 is marked as AST_SENT as well, in
535 cse of empy worklist we would exit
536 on first conflict met. */
537 /* There IS a case where such flag is
538 not set for a lock, yet it blocks
539 something. Luckily for us this is
540 only during destroy, so lock is
541 exclusive. So here we are safe */
542 if (!ldlm_is_ast_sent(lock))
546 /* non-group locks are compatible, overlap doesn't
548 if (likely(req_mode != LCK_GROUP))
551 /* If we are trying to get a GROUP lock and there is
552 another one of this kind, we need to compare gid */
553 if (req->l_policy_data.l_extent.gid ==
554 lock->l_policy_data.l_extent.gid) {
555 /* If existing lock with matched gid is granted,
556 we grant new one too. */
557 if (lock->l_req_mode == lock->l_granted_mode)
560 /* Otherwise we are scanning queue of waiting
561 * locks and it means current request would
562 * block along with existing lock (that is
564 * If we are in nonblocking mode - return
566 if (*flags & (LDLM_FL_BLOCK_NOWAIT
567 | LDLM_FL_SPECULATIVE)) {
568 compat = -EWOULDBLOCK;
571 /* If this group lock is compatible with another
572 * group lock on the waiting list, they must be
573 * together in the list, so they can be granted
574 * at the same time. Otherwise the later lock
575 * can get stuck behind another, incompatible,
577 ldlm_resource_insert_lock_after(lock, req);
578 /* Because 'lock' is not granted, we can stop
579 * processing this queue and return immediately.
580 * There is no need to check the rest of the
586 if (unlikely(req_mode == LCK_GROUP &&
587 (lock->l_req_mode != lock->l_granted_mode))) {
590 if (lock->l_req_mode != LCK_GROUP) {
591 /* Ok, we hit non-GROUP lock, there should be no
592 more GROUP locks later on, queue in front of
593 first non-GROUP lock */
595 ldlm_resource_insert_lock_after(lock, req);
596 list_del_init(&lock->l_res_link);
597 ldlm_resource_insert_lock_after(req, lock);
600 if (req->l_policy_data.l_extent.gid ==
601 lock->l_policy_data.l_extent.gid) {
603 ldlm_resource_insert_lock_after(lock, req);
609 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
610 /* If compared lock is GROUP, then requested is
611 * PR/PW so this is not compatible; extent
612 * range does not matter */
613 if (*flags & (LDLM_FL_BLOCK_NOWAIT
614 | LDLM_FL_SPECULATIVE)) {
615 compat = -EWOULDBLOCK;
618 *flags |= LDLM_FL_NO_TIMEOUT;
620 } else if (lock->l_policy_data.l_extent.end < req_start ||
621 lock->l_policy_data.l_extent.start > req_end) {
622 /* if a non group lock doesn't overlap skip it */
624 } else if (lock->l_req_extent.end < req_start ||
625 lock->l_req_extent.start > req_end) {
626 /* false contention, the requests doesn't really overlap */
627 check_contention = 0;
633 if (*flags & LDLM_FL_SPECULATIVE) {
634 compat = -EWOULDBLOCK;
638 /* don't count conflicting glimpse locks */
639 if (lock->l_req_mode == LCK_PR &&
640 lock->l_policy_data.l_extent.start == 0 &&
641 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
642 check_contention = 0;
644 *contended_locks += check_contention;
647 if (lock->l_blocking_ast &&
648 lock->l_req_mode != LCK_GROUP)
649 ldlm_add_ast_work_item(lock, req, work_list);
653 if (ldlm_check_contention(req, *contended_locks) &&
655 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
656 req->l_req_mode != LCK_GROUP &&
657 req_end - req_start <=
658 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
659 GOTO(destroylock, compat = -EUSERS);
663 list_del_init(&req->l_res_link);
664 ldlm_lock_destroy_nolock(req);
670 * This function refresh eviction timer for cancelled lock.
671 * \param[in] lock ldlm lock for refresh
672 * \param[in] arg ldlm prolong arguments, timeout, export, extent
673 * and counter are used
675 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
676 struct ldlm_prolong_args *arg)
680 if (arg->lpa_export != lock->l_export ||
681 lock->l_flags & LDLM_FL_DESTROYED)
682 /* ignore unrelated locks */
685 arg->lpa_locks_cnt++;
687 if (!(lock->l_flags & LDLM_FL_AST_SENT))
688 /* ignore locks not being cancelled */
691 /* We are in the middle of the process - BL AST is sent, CANCEL
692 * is ahead. Take half of BL AT + IO AT process time.
694 timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
696 LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
698 arg->lpa_blocks_cnt++;
700 /* OK. this is a possible lock the user holds doing I/O
701 * let's refresh eviction timer for it.
703 ldlm_refresh_waiting_lock(lock, timeout);
705 EXPORT_SYMBOL(ldlm_lock_prolong_one);
707 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
710 struct ldlm_prolong_args *arg = data;
711 struct ldlm_interval *node = to_ldlm_interval(n);
712 struct ldlm_lock *lock;
716 LASSERT(!list_empty(&node->li_group));
718 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
719 ldlm_lock_prolong_one(lock, arg);
722 RETURN(INTERVAL_ITER_CONT);
726 * Walk through granted tree and prolong locks if they overlaps extent.
728 * \param[in] arg prolong args
730 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
732 struct ldlm_interval_tree *tree;
733 struct ldlm_resource *res;
734 struct interval_node_extent ex = { .start = arg->lpa_extent.start,
735 .end = arg->lpa_extent.end };
740 res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
741 &arg->lpa_resid, LDLM_EXTENT, 0);
743 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
744 arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
749 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
750 tree = &res->lr_itree[idx];
751 if (tree->lit_root == NULL) /* empty tree, skipped */
754 /* There is no possibility to check for the groupID
755 * so all the group locks are considered as valid
756 * here, especially because the client is supposed
757 * to check it has such a lock before sending an RPC.
759 if (!(tree->lit_mode & arg->lpa_mode))
762 interval_search(tree->lit_root, &ex,
763 ldlm_resource_prolong_cb, arg);
767 ldlm_resource_putref(res);
771 EXPORT_SYMBOL(ldlm_resource_prolong);
774 * Process a granting attempt for extent lock.
775 * Must be called with ns lock held.
777 * This function looks for any conflicts for \a lock in the granted or
778 * waiting queues. The lock is granted if no conflicts are found in
781 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
782 enum ldlm_process_intention intention,
783 enum ldlm_error *err, struct list_head *work_list)
785 struct ldlm_resource *res = lock->l_resource;
786 struct list_head rpc_list;
788 int contended_locks = 0;
791 LASSERT(lock->l_granted_mode != lock->l_req_mode);
792 LASSERT(list_empty(&res->lr_converting));
793 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
794 !ldlm_is_ast_discard_data(lock));
795 INIT_LIST_HEAD(&rpc_list);
796 check_res_locked(res);
799 if (intention == LDLM_PROCESS_RESCAN) {
800 /* Careful observers will note that we don't handle -EWOULDBLOCK
801 * here, but it's ok for a non-obvious reason -- compat_queue
802 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
803 * SPECULATIVE). flags should always be zero here, and if that
804 * ever stops being true, we want to find out. */
805 LASSERT(*flags == 0);
806 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
807 err, NULL, &contended_locks);
809 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
814 RETURN(LDLM_ITER_STOP);
816 ldlm_resource_unlink_lock(lock);
818 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
819 ldlm_extent_policy(res, lock, flags);
820 ldlm_grant_lock(lock, work_list);
821 RETURN(LDLM_ITER_CONTINUE);
824 LASSERT((intention == LDLM_PROCESS_ENQUEUE && work_list == NULL) ||
825 (intention == LDLM_PROCESS_RECOVERY && work_list != NULL));
828 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
829 &rpc_list, &contended_locks);
831 GOTO(out_rpc_list, rc);
835 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
836 flags, err, &rpc_list,
839 GOTO(out_rpc_list, rc = rc2);
843 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to force
844 * client to wait for the lock endlessly once the lock is
846 rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list,
852 ldlm_extent_policy(res, lock, flags);
853 ldlm_resource_unlink_lock(lock);
854 ldlm_grant_lock(lock, work_list);
859 if (!list_empty(&rpc_list)) {
860 LASSERT(!ldlm_is_ast_discard_data(lock));
861 ldlm_discard_bl_list(&rpc_list);
865 #endif /* HAVE_SERVER_SUPPORT */
867 struct ldlm_kms_shift_args {
873 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
874 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
877 struct ldlm_kms_shift_args *arg = args;
878 struct ldlm_interval *node = to_ldlm_interval(n);
879 struct ldlm_lock *tmplock;
880 struct ldlm_lock *lock = NULL;
884 /* Since all locks in an interval have the same extent, we can just
885 * use the first lock without kms_ignore set. */
886 list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
887 if (ldlm_is_kms_ignore(tmplock))
895 /* No locks in this interval without kms_ignore set */
897 RETURN(INTERVAL_ITER_CONT);
899 /* If we find a lock with a greater or equal kms, we are not the
900 * highest lock (or we share that distinction with another lock), and
901 * don't need to update KMS. Return old_kms and stop looking. */
902 if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
903 arg->kms = arg->old_kms;
904 arg->complete = true;
905 RETURN(INTERVAL_ITER_STOP);
908 if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
909 arg->kms = lock->l_policy_data.l_extent.end + 1;
911 /* Since interval_iterate_reverse starts with the highest lock and
912 * works down, for PW locks, we only need to check if we should update
913 * the kms, then stop walking the tree. PR locks are not exclusive, so
914 * the highest start does not imply the highest end and we must
915 * continue. (Only one group lock is allowed per resource, so this is
916 * irrelevant for group locks.)*/
917 if (lock->l_granted_mode == LCK_PW)
918 RETURN(INTERVAL_ITER_STOP);
920 RETURN(INTERVAL_ITER_CONT);
923 /* When a lock is cancelled by a client, the KMS may undergo change if this
924 * is the "highest lock". This function returns the new KMS value, updating
925 * it only if we were the highest lock.
927 * Caller must hold lr_lock already.
929 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
930 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
932 struct ldlm_resource *res = lock->l_resource;
933 struct ldlm_interval_tree *tree;
934 struct ldlm_kms_shift_args args;
939 args.old_kms = old_kms;
941 args.complete = false;
943 /* don't let another thread in ldlm_extent_shift_kms race in
944 * just after we finish and take our lock into account in its
945 * calculation of the kms */
946 ldlm_set_kms_ignore(lock);
948 /* We iterate over the lock trees, looking for the largest kms smaller
949 * than the current one. */
950 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
951 tree = &res->lr_itree[idx];
953 /* If our already known kms is >= than the highest 'end' in
954 * this tree, we don't need to check this tree, because
955 * the kms from a tree can be lower than in_max_high (due to
956 * kms_ignore), but it can never be higher. */
957 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
960 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
963 /* this tells us we're not the highest lock, so we don't need
964 * to check the remaining trees */
969 LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
974 EXPORT_SYMBOL(ldlm_extent_shift_kms);
976 struct kmem_cache *ldlm_interval_slab;
977 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
979 struct ldlm_interval *node;
982 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
983 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
987 INIT_LIST_HEAD(&node->li_group);
988 ldlm_interval_attach(node, lock);
992 void ldlm_interval_free(struct ldlm_interval *node)
995 LASSERT(list_empty(&node->li_group));
996 LASSERT(!interval_is_intree(&node->li_node));
997 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1001 /* interval tree, for LDLM_EXTENT. */
1002 void ldlm_interval_attach(struct ldlm_interval *n,
1003 struct ldlm_lock *l)
1005 LASSERT(l->l_tree_node == NULL);
1006 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
1008 list_add_tail(&l->l_sl_policy, &n->li_group);
1012 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
1014 struct ldlm_interval *n = l->l_tree_node;
1019 LASSERT(!list_empty(&n->li_group));
1020 l->l_tree_node = NULL;
1021 list_del_init(&l->l_sl_policy);
1023 return list_empty(&n->li_group) ? n : NULL;
1026 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
1031 LASSERT(is_power_of_2(mode));
1032 for (index = -1; mode != 0; index++, mode >>= 1)
1034 LASSERT(index < LCK_MODE_NUM);
1038 /** Add newly granted lock into interval tree for the resource. */
1039 void ldlm_extent_add_lock(struct ldlm_resource *res,
1040 struct ldlm_lock *lock)
1042 struct interval_node *found, **root;
1043 struct ldlm_interval *node;
1044 struct ldlm_extent *extent;
1047 LASSERT(lock->l_granted_mode == lock->l_req_mode);
1049 node = lock->l_tree_node;
1050 LASSERT(node != NULL);
1051 LASSERT(!interval_is_intree(&node->li_node));
1053 idx = ldlm_mode_to_index(lock->l_granted_mode);
1054 LASSERT(lock->l_granted_mode == 1 << idx);
1055 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1057 /* node extent initialize */
1058 extent = &lock->l_policy_data.l_extent;
1060 rc = interval_set(&node->li_node, extent->start, extent->end);
1063 root = &res->lr_itree[idx].lit_root;
1064 found = interval_insert(&node->li_node, root);
1065 if (found) { /* The policy group found. */
1066 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1067 LASSERT(tmp != NULL);
1068 ldlm_interval_free(tmp);
1069 ldlm_interval_attach(to_ldlm_interval(found), lock);
1071 res->lr_itree[idx].lit_size++;
1073 /* even though we use interval tree to manage the extent lock, we also
1074 * add the locks into grant list, for debug purpose, .. */
1075 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1077 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1078 struct ldlm_lock *lck;
1080 list_for_each_entry_reverse(lck, &res->lr_granted,
1084 if (lockmode_compat(lck->l_granted_mode,
1085 lock->l_granted_mode))
1087 if (ldlm_extent_overlap(&lck->l_req_extent,
1088 &lock->l_req_extent)) {
1089 CDEBUG(D_ERROR, "granting conflicting lock %p "
1091 ldlm_resource_dump(D_ERROR, res);
1098 /** Remove cancelled lock from resource interval tree. */
1099 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1101 struct ldlm_resource *res = lock->l_resource;
1102 struct ldlm_interval *node = lock->l_tree_node;
1103 struct ldlm_interval_tree *tree;
1106 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1109 idx = ldlm_mode_to_index(lock->l_granted_mode);
1110 LASSERT(lock->l_granted_mode == 1 << idx);
1111 tree = &res->lr_itree[idx];
1113 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1116 node = ldlm_interval_detach(lock);
1118 interval_erase(&node->li_node, &tree->lit_root);
1119 ldlm_interval_free(node);
1123 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1124 union ldlm_policy_data *lpolicy)
1126 lpolicy->l_extent.start = wpolicy->l_extent.start;
1127 lpolicy->l_extent.end = wpolicy->l_extent.end;
1128 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1131 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1132 union ldlm_wire_policy_data *wpolicy)
1134 memset(wpolicy, 0, sizeof(*wpolicy));
1135 wpolicy->l_extent.start = lpolicy->l_extent.start;
1136 wpolicy->l_extent.end = lpolicy->l_extent.end;
1137 wpolicy->l_extent.gid = lpolicy->l_extent.gid;