4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ldlm/ldlm_extent.c
33 * Author: Peter Braam <braam@clusterfs.com>
34 * Author: Phil Schwan <phil@clusterfs.com>
38 * This file contains implementation of EXTENT lock type
40 * EXTENT lock type is for locking a contiguous range of values, represented
41 * by 64-bit starting and ending offsets (inclusive). There are several extent
42 * lock modes, some of which may be mutually incompatible. Extent locks are
43 * considered incompatible if their modes are incompatible and their extents
44 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
47 #define DEBUG_SUBSYSTEM S_LDLM
49 #include <libcfs/libcfs.h>
50 #include <lustre_dlm.h>
51 #include <obd_support.h>
53 #include <obd_class.h>
54 #include <lustre_lib.h>
56 #include "ldlm_internal.h"
58 #ifdef HAVE_SERVER_SUPPORT
59 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
62 * Fix up the ldlm_extent after expanding it.
64 * After expansion has been done, we might still want to do certain adjusting
65 * based on overall contention of the resource and the like to avoid granting
68 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
69 struct ldlm_extent *new_ex,
72 enum ldlm_mode req_mode = req->l_req_mode;
73 __u64 req_start = req->l_req_extent.start;
74 __u64 req_end = req->l_req_extent.end;
75 __u64 req_align, mask;
77 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
78 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
79 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
83 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
88 /* we need to ensure that the lock extent is properly aligned to what
89 * the client requested. Also we need to make sure it's also server
90 * page size aligned otherwise a server page can be covered by two
93 req_align = (req_end + 1) | req_start;
94 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
95 while ((req_align & mask) == 0)
99 /* We can only shrink the lock, not grow it.
100 * This should never cause lock to be smaller than requested,
101 * since requested lock was already aligned on these boundaries. */
102 new_ex->start = ((new_ex->start - 1) | mask) + 1;
103 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
104 LASSERTF(new_ex->start <= req_start,
105 "mask %#llx grant start %llu req start %llu\n",
106 mask, new_ex->start, req_start);
107 LASSERTF(new_ex->end >= req_end,
108 "mask %#llx grant end %llu req end %llu\n",
109 mask, new_ex->end, req_end);
113 * Return the maximum extent that:
114 * - contains the requested extent
115 * - does not overlap existing conflicting extents outside the requested one
117 * This allows clients to request a small required extent range, but if there
118 * is no contention on the lock the full lock can be granted to the client.
119 * This avoids the need for many smaller lock requests to be granted in the
120 * common (uncontended) case.
122 * Use interval tree to expand the lock extent for granted lock.
124 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
125 struct ldlm_extent *new_ex)
127 struct ldlm_resource *res = req->l_resource;
128 enum ldlm_mode req_mode = req->l_req_mode;
129 __u64 req_start = req->l_req_extent.start;
130 __u64 req_end = req->l_req_extent.end;
131 struct ldlm_interval_tree *tree;
132 struct interval_node_extent limiter = {
133 .start = new_ex->start,
140 lockmode_verify(req_mode);
142 /* Using interval tree to handle the LDLM extent granted locks. */
143 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
144 struct interval_node_extent ext = {
149 tree = &res->lr_itree[idx];
150 if (lockmode_compat(tree->lit_mode, req_mode))
153 conflicting += tree->lit_size;
155 limiter.start = req_start;
157 if (interval_is_overlapped(tree->lit_root, &ext))
159 "req_mode = %d, tree->lit_mode = %d, "
160 "tree->lit_size = %d\n",
161 req_mode, tree->lit_mode, tree->lit_size);
162 interval_expand(tree->lit_root, &ext, &limiter);
163 limiter.start = max(limiter.start, ext.start);
164 limiter.end = min(limiter.end, ext.end);
165 if (limiter.start == req_start && limiter.end == req_end)
169 new_ex->start = limiter.start;
170 new_ex->end = limiter.end;
171 LASSERT(new_ex->start <= req_start);
172 LASSERT(new_ex->end >= req_end);
174 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
178 /* The purpose of this function is to return:
179 * - the maximum extent
180 * - containing the requested extent
181 * - and not overlapping existing conflicting extents outside the requested one
184 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
185 struct ldlm_extent *new_ex)
187 struct ldlm_resource *res = req->l_resource;
188 enum ldlm_mode req_mode = req->l_req_mode;
189 __u64 req_start = req->l_req_extent.start;
190 __u64 req_end = req->l_req_extent.end;
191 struct ldlm_lock *lock;
195 lockmode_verify(req_mode);
197 /* for waiting locks */
198 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
199 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
201 /* We already hit the minimum requested size, search no more */
202 if (new_ex->start == req_start && new_ex->end == req_end) {
207 /* Don't conflict with ourselves */
211 /* Locks are compatible, overlap doesn't matter */
212 /* Until bug 20 is fixed, try to avoid granting overlapping
213 * locks on one client (they take a long time to cancel) */
214 if (lockmode_compat(lock->l_req_mode, req_mode) &&
215 lock->l_export != req->l_export)
218 /* If this is a high-traffic lock, don't grow downwards at all
219 * or grow upwards too much */
222 new_ex->start = req_start;
224 /* If lock doesn't overlap new_ex, skip it. */
225 if (!ldlm_extent_overlap(l_extent, new_ex))
228 /* Locks conflicting in requested extents and we can't satisfy
229 * both locks, so ignore it. Either we will ping-pong this
230 * extent (we would regardless of what extent we granted) or
231 * lock is unused and it shouldn't limit our extent growth. */
232 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
235 /* We grow extents downwards only as far as they don't overlap
236 * with already-granted locks, on the assumption that clients
237 * will be writing beyond the initial requested end and would
238 * then need to enqueue a new lock beyond previous request.
239 * l_req_extent->end strictly < req_start, checked above. */
240 if (l_extent->start < req_start && new_ex->start != req_start) {
241 if (l_extent->end >= req_start)
242 new_ex->start = req_start;
244 new_ex->start = min(l_extent->end+1, req_start);
247 /* If we need to cancel this lock anyways because our request
248 * overlaps the granted lock, we grow up to its requested
249 * extent start instead of limiting this extent, assuming that
250 * clients are writing forwards and the lock had over grown
251 * its extent downwards before we enqueued our request. */
252 if (l_extent->end > req_end) {
253 if (l_extent->start <= req_end)
254 new_ex->end = max(lock->l_req_extent.start - 1,
257 new_ex->end = max(l_extent->start - 1, req_end);
261 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
266 /* In order to determine the largest possible extent we can grant, we need
267 * to scan all of the queues. */
268 static void ldlm_extent_policy(struct ldlm_resource *res,
269 struct ldlm_lock *lock, __u64 *flags)
271 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
273 if (lock->l_export == NULL)
275 * this is a local lock taken by server (e.g., as a part of
276 * OST-side locking, or unlink handling). Expansion doesn't
277 * make a lot of sense for local locks, because they are
278 * dropped immediately on operation completion and would only
279 * conflict with other threads.
283 if (lock->l_policy_data.l_extent.start == 0 &&
284 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
285 /* fast-path whole file locks */
288 /* Because reprocess_queue zeroes flags and uses it to return
289 * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
290 * in the lock flags rather than the 'flags' argument */
291 if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
292 ldlm_extent_internal_policy_granted(lock, &new_ex);
293 ldlm_extent_internal_policy_waiting(lock, &new_ex);
295 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
296 new_ex.start = lock->l_policy_data.l_extent.start;
297 new_ex.end = lock->l_policy_data.l_extent.end;
298 /* In case the request is not on correct boundaries, we call
299 * fixup. (normally called in ldlm_extent_internal_policy_*) */
300 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
303 if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
304 *flags |= LDLM_FL_LOCK_CHANGED;
305 lock->l_policy_data.l_extent.start = new_ex.start;
306 lock->l_policy_data.l_extent.end = new_ex.end;
310 static bool ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
312 struct ldlm_resource *res = lock->l_resource;
313 time64_t now = ktime_get_seconds();
315 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
318 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
319 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
320 res->lr_contention_time = now;
322 return now < res->lr_contention_time +
323 ldlm_res_to_ns(res)->ns_contention_time;
326 struct ldlm_extent_compat_args {
327 struct list_head *work_list;
328 struct ldlm_lock *lock;
334 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
337 struct ldlm_extent_compat_args *priv = data;
338 struct ldlm_interval *node = to_ldlm_interval(n);
339 struct ldlm_extent *extent;
340 struct list_head *work_list = priv->work_list;
341 struct ldlm_lock *lock, *enq = priv->lock;
342 enum ldlm_mode mode = priv->mode;
346 LASSERT(!list_empty(&node->li_group));
348 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
349 /* interval tree is for granted lock */
350 LASSERTF(mode == lock->l_granted_mode,
351 "mode = %s, lock->l_granted_mode = %s\n",
353 ldlm_lockname[lock->l_granted_mode]);
355 if (lock->l_blocking_ast &&
356 lock->l_granted_mode != LCK_GROUP)
357 ldlm_add_ast_work_item(lock, enq, work_list);
360 /* don't count conflicting glimpse locks */
361 extent = ldlm_interval_extent(node);
362 if (!(mode == LCK_PR &&
363 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
364 *priv->locks += count;
369 RETURN(INTERVAL_ITER_CONT);
373 * Determine if the lock is compatible with all locks on the queue.
375 * If \a work_list is provided, conflicting locks are linked there.
376 * If \a work_list is not provided, we exit this function on first conflict.
378 * \retval 0 if the lock is not compatible
379 * \retval 1 if the lock is compatible
380 * \retval 2 if \a req is a group lock and it is compatible and requires
381 * no further checking
382 * \retval negative error, such as EAGAIN for group locks
385 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
386 __u64 *flags, struct list_head *work_list,
387 int *contended_locks)
389 struct ldlm_resource *res = req->l_resource;
390 enum ldlm_mode req_mode = req->l_req_mode;
391 __u64 req_start = req->l_req_extent.start;
392 __u64 req_end = req->l_req_extent.end;
393 struct ldlm_lock *lock;
394 int check_contention;
398 lockmode_verify(req_mode);
400 /* Using interval tree for granted lock */
401 if (queue == &res->lr_granted) {
402 struct ldlm_interval_tree *tree;
403 struct ldlm_extent_compat_args data = {.work_list = work_list,
405 .locks = contended_locks,
407 struct interval_node_extent ex = { .start = req_start,
411 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
412 tree = &res->lr_itree[idx];
413 if (tree->lit_root == NULL) /* empty tree, skipped */
416 data.mode = tree->lit_mode;
417 if (lockmode_compat(req_mode, tree->lit_mode)) {
418 struct ldlm_interval *node;
419 struct ldlm_extent *extent;
421 if (req_mode != LCK_GROUP)
424 /* group lock, grant it immediately if
426 node = to_ldlm_interval(tree->lit_root);
427 extent = ldlm_interval_extent(node);
428 if (req->l_policy_data.l_extent.gid ==
433 if (tree->lit_mode == LCK_GROUP) {
434 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
435 LDLM_FL_SPECULATIVE)) {
443 /* if work list is not NULL,add all
444 locks in the tree to work list */
446 interval_iterate(tree->lit_root,
447 ldlm_extent_compat_cb, &data);
451 /* We've found a potentially blocking lock, check
452 * compatibility. This handles locks other than GROUP
453 * locks, which are handled separately above.
455 * Locks with FL_SPECULATIVE are asynchronous requests
456 * which must never wait behind another lock, so they
457 * fail if any conflicting lock is found. */
458 if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
459 rc = interval_is_overlapped(tree->lit_root,
470 interval_search(tree->lit_root, &ex,
471 ldlm_extent_compat_cb, &data);
472 if (!list_empty(work_list) && compat)
476 } else { /* for waiting queue */
477 list_for_each_entry(lock, queue, l_res_link) {
478 check_contention = 1;
480 /* We stop walking the queue if we hit ourselves so
481 * we don't take conflicting locks enqueued after us
482 * into account, or we'd wait forever. */
486 /* locks are compatible, overlap doesn't matter */
487 if (lockmode_compat(lock->l_req_mode, req_mode)) {
488 if (req_mode == LCK_PR &&
489 ((lock->l_policy_data.l_extent.start <=
490 req->l_policy_data.l_extent.start) &&
491 (lock->l_policy_data.l_extent.end >=
492 req->l_policy_data.l_extent.end))) {
493 /* If we met a PR lock just like us or
494 wider, and nobody down the list
495 conflicted with it, that means we
496 can skip processing of the rest of
497 the list and safely place ourselves
498 at the end of the list, or grant
499 (dependent if we met an conflicting
500 locks before in the list). In case
501 of 1st enqueue only we continue
502 traversing if there is something
503 conflicting down the list because
504 we need to make sure that something
505 is marked as AST_SENT as well, in
506 cse of empy worklist we would exit
507 on first conflict met. */
508 /* There IS a case where such flag is
509 not set for a lock, yet it blocks
510 something. Luckily for us this is
511 only during destroy, so lock is
512 exclusive. So here we are safe */
513 if (!ldlm_is_ast_sent(lock))
517 /* non-group locks are compatible, overlap doesn't
519 if (likely(req_mode != LCK_GROUP))
522 /* If we are trying to get a GROUP lock and there is
523 another one of this kind, we need to compare gid */
524 if (req->l_policy_data.l_extent.gid ==
525 lock->l_policy_data.l_extent.gid) {
526 /* If existing lock with matched gid is granted,
527 we grant new one too. */
528 if (ldlm_is_granted(lock))
531 /* Otherwise we are scanning queue of waiting
532 * locks and it means current request would
533 * block along with existing lock (that is
535 * If we are in nonblocking mode - return
537 if (*flags & (LDLM_FL_BLOCK_NOWAIT
538 | LDLM_FL_SPECULATIVE)) {
542 /* If this group lock is compatible with another
543 * group lock on the waiting list, they must be
544 * together in the list, so they can be granted
545 * at the same time. Otherwise the later lock
546 * can get stuck behind another, incompatible,
548 ldlm_resource_insert_lock_after(lock, req);
549 /* Because 'lock' is not granted, we can stop
550 * processing this queue and return immediately.
551 * There is no need to check the rest of the
557 if (unlikely(req_mode == LCK_GROUP &&
558 !ldlm_is_granted(lock))) {
560 if (lock->l_req_mode != LCK_GROUP) {
561 /* Ok, we hit non-GROUP lock, there should be no
562 more GROUP locks later on, queue in front of
563 first non-GROUP lock */
565 ldlm_resource_insert_lock_before(lock, req);
568 LASSERT(req->l_policy_data.l_extent.gid !=
569 lock->l_policy_data.l_extent.gid);
573 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
574 /* If compared lock is GROUP, then requested is
575 * PR/PW so this is not compatible; extent
576 * range does not matter */
577 if (*flags & (LDLM_FL_BLOCK_NOWAIT
578 | LDLM_FL_SPECULATIVE)) {
582 } else if (lock->l_policy_data.l_extent.end < req_start ||
583 lock->l_policy_data.l_extent.start > req_end) {
584 /* if a non group lock doesn't overlap skip it */
586 } else if (lock->l_req_extent.end < req_start ||
587 lock->l_req_extent.start > req_end) {
588 /* false contention, the requests doesn't really overlap */
589 check_contention = 0;
595 if (*flags & LDLM_FL_SPECULATIVE) {
600 /* don't count conflicting glimpse locks */
601 if (lock->l_req_mode == LCK_PR &&
602 lock->l_policy_data.l_extent.start == 0 &&
603 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
604 check_contention = 0;
606 *contended_locks += check_contention;
609 if (lock->l_blocking_ast &&
610 lock->l_req_mode != LCK_GROUP)
611 ldlm_add_ast_work_item(lock, req, work_list);
615 if (ldlm_check_contention(req, *contended_locks) &&
617 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
618 req->l_req_mode != LCK_GROUP &&
619 req_end - req_start <=
620 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
621 GOTO(destroylock, compat = -EUSERS);
625 list_del_init(&req->l_res_link);
626 ldlm_lock_destroy_nolock(req);
631 * This function refresh eviction timer for cancelled lock.
632 * \param[in] lock ldlm lock for refresh
633 * \param[in] arg ldlm prolong arguments, timeout, export, extent
634 * and counter are used
636 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
637 struct ldlm_prolong_args *arg)
641 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
643 if (arg->lpa_export != lock->l_export ||
644 lock->l_flags & LDLM_FL_DESTROYED)
645 /* ignore unrelated locks */
648 arg->lpa_locks_cnt++;
650 if (!(lock->l_flags & LDLM_FL_AST_SENT))
651 /* ignore locks not being cancelled */
654 arg->lpa_blocks_cnt++;
656 /* OK. this is a possible lock the user holds doing I/O
657 * let's refresh eviction timer for it.
659 timeout = ldlm_bl_timeout_by_rpc(arg->lpa_req);
660 LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
661 ldlm_refresh_waiting_lock(lock, timeout);
663 EXPORT_SYMBOL(ldlm_lock_prolong_one);
665 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
668 struct ldlm_prolong_args *arg = data;
669 struct ldlm_interval *node = to_ldlm_interval(n);
670 struct ldlm_lock *lock;
674 LASSERT(!list_empty(&node->li_group));
676 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
677 ldlm_lock_prolong_one(lock, arg);
680 RETURN(INTERVAL_ITER_CONT);
684 * Walk through granted tree and prolong locks if they overlaps extent.
686 * \param[in] arg prolong args
688 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
690 struct ldlm_interval_tree *tree;
691 struct ldlm_resource *res;
692 struct interval_node_extent ex = { .start = arg->lpa_extent.start,
693 .end = arg->lpa_extent.end };
698 res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace,
699 &arg->lpa_resid, LDLM_EXTENT, 0);
701 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
702 arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
707 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
708 tree = &res->lr_itree[idx];
709 if (tree->lit_root == NULL) /* empty tree, skipped */
712 /* There is no possibility to check for the groupID
713 * so all the group locks are considered as valid
714 * here, especially because the client is supposed
715 * to check it has such a lock before sending an RPC.
717 if (!(tree->lit_mode & arg->lpa_mode))
720 interval_search(tree->lit_root, &ex,
721 ldlm_resource_prolong_cb, arg);
725 ldlm_resource_putref(res);
729 EXPORT_SYMBOL(ldlm_resource_prolong);
732 * Process a granting attempt for extent lock.
733 * Must be called with ns lock held.
735 * This function looks for any conflicts for \a lock in the granted or
736 * waiting queues. The lock is granted if no conflicts are found in
739 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
740 enum ldlm_process_intention intention,
741 enum ldlm_error *err, struct list_head *work_list)
743 struct ldlm_resource *res = lock->l_resource;
745 int contended_locks = 0;
746 struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
750 LASSERT(!ldlm_is_granted(lock));
751 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
752 !ldlm_is_ast_discard_data(lock));
753 check_res_locked(res);
756 if (intention == LDLM_PROCESS_RESCAN) {
757 /* Careful observers will note that we don't handle -EAGAIN
758 * here, but it's ok for a non-obvious reason -- compat_queue
759 * can only return -EAGAIN if (flags & BLOCK_NOWAIT |
760 * SPECULATIVE). flags should always be zero here, and if that
761 * ever stops being true, we want to find out. */
762 LASSERT(*flags == 0);
763 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
764 NULL, &contended_locks);
766 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
771 RETURN(LDLM_ITER_STOP);
773 ldlm_resource_unlink_lock(lock);
775 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
776 ldlm_extent_policy(res, lock, flags);
777 ldlm_grant_lock(lock, grant_work);
778 RETURN(LDLM_ITER_CONTINUE);
782 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
783 work_list, &contended_locks);
785 GOTO(out, *err = rc);
788 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
792 GOTO(out, *err = rc = rc2);
796 ldlm_extent_policy(res, lock, flags);
797 ldlm_resource_unlink_lock(lock);
798 ldlm_grant_lock(lock, grant_work);
800 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
801 * force client to wait for the lock endlessly once
802 * the lock is enqueued -bzzz */
803 *flags |= LDLM_FL_NO_TIMEOUT;
806 RETURN(LDLM_ITER_CONTINUE);
810 #endif /* HAVE_SERVER_SUPPORT */
812 struct ldlm_kms_shift_args {
818 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
819 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
822 struct ldlm_kms_shift_args *arg = args;
823 struct ldlm_interval *node = to_ldlm_interval(n);
824 struct ldlm_lock *tmplock;
825 struct ldlm_lock *lock = NULL;
829 /* Since all locks in an interval have the same extent, we can just
830 * use the first lock without kms_ignore set. */
831 list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
832 if (ldlm_is_kms_ignore(tmplock))
840 /* No locks in this interval without kms_ignore set */
842 RETURN(INTERVAL_ITER_CONT);
844 /* If we find a lock with a greater or equal kms, we are not the
845 * highest lock (or we share that distinction with another lock), and
846 * don't need to update KMS. Return old_kms and stop looking. */
847 if (lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF ||
848 lock->l_policy_data.l_extent.end + 1 >= arg->old_kms) {
849 arg->kms = arg->old_kms;
850 arg->complete = true;
851 RETURN(INTERVAL_ITER_STOP);
854 if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
855 arg->kms = lock->l_policy_data.l_extent.end + 1;
857 /* Since interval_iterate_reverse starts with the highest lock and
858 * works down, for PW locks, we only need to check if we should update
859 * the kms, then stop walking the tree. PR locks are not exclusive, so
860 * the highest start does not imply the highest end and we must
861 * continue. (Only one group lock is allowed per resource, so this is
862 * irrelevant for group locks.)*/
863 if (lock->l_granted_mode == LCK_PW)
864 RETURN(INTERVAL_ITER_STOP);
866 RETURN(INTERVAL_ITER_CONT);
869 /* When a lock is cancelled by a client, the KMS may undergo change if this
870 * is the "highest lock". This function returns the new KMS value, updating
871 * it only if we were the highest lock.
873 * Caller must hold lr_lock already.
875 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
876 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
878 struct ldlm_resource *res = lock->l_resource;
879 struct ldlm_interval_tree *tree;
880 struct ldlm_kms_shift_args args;
885 args.old_kms = old_kms;
887 args.complete = false;
889 /* don't let another thread in ldlm_extent_shift_kms race in
890 * just after we finish and take our lock into account in its
891 * calculation of the kms */
892 ldlm_set_kms_ignore(lock);
894 /* We iterate over the lock trees, looking for the largest kms smaller
895 * than the current one. */
896 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
897 tree = &res->lr_itree[idx];
899 /* If our already known kms is >= than the highest 'end' in
900 * this tree, we don't need to check this tree, because
901 * the kms from a tree can be lower than in_max_high (due to
902 * kms_ignore), but it can never be higher. */
903 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
906 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
909 /* this tells us we're not the highest lock, so we don't need
910 * to check the remaining trees */
915 LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
920 EXPORT_SYMBOL(ldlm_extent_shift_kms);
922 struct kmem_cache *ldlm_interval_slab;
923 static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
925 struct ldlm_interval *node;
928 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
929 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
933 INIT_LIST_HEAD(&node->li_group);
934 ldlm_interval_attach(node, lock);
938 void ldlm_interval_free(struct ldlm_interval *node)
941 LASSERT(list_empty(&node->li_group));
942 LASSERT(!interval_is_intree(&node->li_node));
943 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
947 /* interval tree, for LDLM_EXTENT. */
948 void ldlm_interval_attach(struct ldlm_interval *n,
951 LASSERT(l->l_tree_node == NULL);
952 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
954 list_add_tail(&l->l_sl_policy, &n->li_group);
958 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
960 struct ldlm_interval *n = l->l_tree_node;
965 LASSERT(!list_empty(&n->li_group));
966 l->l_tree_node = NULL;
967 list_del_init(&l->l_sl_policy);
969 return list_empty(&n->li_group) ? n : NULL;
972 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
977 LASSERT(is_power_of_2(mode));
979 LASSERT(index < LCK_MODE_NUM);
983 int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
985 lock->l_tree_node = NULL;
986 if (ldlm_interval_alloc(lock) == NULL)
991 /** Add newly granted lock into interval tree for the resource. */
992 void ldlm_extent_add_lock(struct ldlm_resource *res,
993 struct ldlm_lock *lock)
995 struct interval_node *found, **root;
996 struct ldlm_interval *node;
997 struct ldlm_extent *extent;
1000 LASSERT(ldlm_is_granted(lock));
1002 node = lock->l_tree_node;
1003 LASSERT(node != NULL);
1004 LASSERT(!interval_is_intree(&node->li_node));
1006 idx = ldlm_mode_to_index(lock->l_granted_mode);
1007 LASSERT(lock->l_granted_mode == BIT(idx));
1008 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1010 /* node extent initialize */
1011 extent = &lock->l_policy_data.l_extent;
1013 rc = interval_set(&node->li_node, extent->start, extent->end);
1016 root = &res->lr_itree[idx].lit_root;
1017 found = interval_insert(&node->li_node, root);
1018 if (found) { /* The policy group found. */
1019 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1020 LASSERT(tmp != NULL);
1021 ldlm_interval_free(tmp);
1022 ldlm_interval_attach(to_ldlm_interval(found), lock);
1024 res->lr_itree[idx].lit_size++;
1026 /* even though we use interval tree to manage the extent lock, we also
1027 * add the locks into grant list, for debug purpose, .. */
1028 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1030 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1031 struct ldlm_lock *lck;
1033 list_for_each_entry_reverse(lck, &res->lr_granted,
1037 if (lockmode_compat(lck->l_granted_mode,
1038 lock->l_granted_mode))
1040 if (ldlm_extent_overlap(&lck->l_req_extent,
1041 &lock->l_req_extent)) {
1042 CDEBUG(D_ERROR, "granting conflicting lock %p "
1044 ldlm_resource_dump(D_ERROR, res);
1051 /** Remove cancelled lock from resource interval tree. */
1052 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1054 struct ldlm_resource *res = lock->l_resource;
1055 struct ldlm_interval *node = lock->l_tree_node;
1056 struct ldlm_interval_tree *tree;
1059 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1062 idx = ldlm_mode_to_index(lock->l_granted_mode);
1063 LASSERT(lock->l_granted_mode == BIT(idx));
1064 tree = &res->lr_itree[idx];
1066 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1069 node = ldlm_interval_detach(lock);
1071 interval_erase(&node->li_node, &tree->lit_root);
1072 ldlm_interval_free(node);
1076 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1077 union ldlm_policy_data *lpolicy)
1079 lpolicy->l_extent.start = wpolicy->l_extent.start;
1080 lpolicy->l_extent.end = wpolicy->l_extent.end;
1081 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1084 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1085 union ldlm_wire_policy_data *wpolicy)
1087 memset(wpolicy, 0, sizeof(*wpolicy));
1088 wpolicy->l_extent.start = lpolicy->l_extent.start;
1089 wpolicy->l_extent.end = lpolicy->l_extent.end;
1090 wpolicy->l_extent.gid = lpolicy->l_extent.gid;