4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ldlm/ldlm_extent.c
34 * Author: Peter Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 * This file contains implementation of EXTENT lock type
41 * EXTENT lock type is for locking a contiguous range of values, represented
42 * by 64-bit starting and ending offsets (inclusive). There are several extent
43 * lock modes, some of which may be mutually incompatible. Extent locks are
44 * considered incompatible if their modes are incompatible and their extents
45 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
48 #define DEBUG_SUBSYSTEM S_LDLM
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
57 #include "ldlm_internal.h"
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
63 * Fix up the ldlm_extent after expanding it.
65 * After expansion has been done, we might still want to do certain adjusting
66 * based on overall contention of the resource and the like to avoid granting
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70 struct ldlm_extent *new_ex,
73 enum ldlm_mode req_mode = req->l_req_mode;
74 __u64 req_start = req->l_req_extent.start;
75 __u64 req_end = req->l_req_extent.end;
76 __u64 req_align, mask;
78 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
84 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
89 /* we need to ensure that the lock extent is properly aligned to what
90 * the client requested. Also we need to make sure it's also server
91 * page size aligned otherwise a server page can be covered by two
94 req_align = (req_end + 1) | req_start;
95 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96 while ((req_align & mask) == 0)
100 /* We can only shrink the lock, not grow it.
101 * This should never cause lock to be smaller than requested,
102 * since requested lock was already aligned on these boundaries. */
103 new_ex->start = ((new_ex->start - 1) | mask) + 1;
104 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105 LASSERTF(new_ex->start <= req_start,
106 "mask %#llx grant start %llu req start %llu\n",
107 mask, new_ex->start, req_start);
108 LASSERTF(new_ex->end >= req_end,
109 "mask %#llx grant end %llu req end %llu\n",
110 mask, new_ex->end, req_end);
114 * Return the maximum extent that:
115 * - contains the requested extent
116 * - does not overlap existing conflicting extents outside the requested one
118 * This allows clients to request a small required extent range, but if there
119 * is no contention on the lock the full lock can be granted to the client.
120 * This avoids the need for many smaller lock requests to be granted in the
121 * common (uncontended) case.
123 * Use interval tree to expand the lock extent for granted lock.
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126 struct ldlm_extent *new_ex)
128 struct ldlm_resource *res = req->l_resource;
129 enum ldlm_mode req_mode = req->l_req_mode;
130 __u64 req_start = req->l_req_extent.start;
131 __u64 req_end = req->l_req_extent.end;
132 struct ldlm_interval_tree *tree;
133 struct interval_node_extent limiter = {
134 .start = new_ex->start,
141 lockmode_verify(req_mode);
143 /* Using interval tree to handle the LDLM extent granted locks. */
144 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145 struct interval_node_extent ext = {
150 tree = &res->lr_itree[idx];
151 if (lockmode_compat(tree->lit_mode, req_mode))
154 conflicting += tree->lit_size;
156 limiter.start = req_start;
158 if (interval_is_overlapped(tree->lit_root, &ext))
160 "req_mode = %d, tree->lit_mode = %d, "
161 "tree->lit_size = %d\n",
162 req_mode, tree->lit_mode, tree->lit_size);
163 interval_expand(tree->lit_root, &ext, &limiter);
164 limiter.start = max(limiter.start, ext.start);
165 limiter.end = min(limiter.end, ext.end);
166 if (limiter.start == req_start && limiter.end == req_end)
170 new_ex->start = limiter.start;
171 new_ex->end = limiter.end;
172 LASSERT(new_ex->start <= req_start);
173 LASSERT(new_ex->end >= req_end);
175 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
179 /* The purpose of this function is to return:
180 * - the maximum extent
181 * - containing the requested extent
182 * - and not overlapping existing conflicting extents outside the requested one
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186 struct ldlm_extent *new_ex)
188 struct ldlm_resource *res = req->l_resource;
189 enum ldlm_mode req_mode = req->l_req_mode;
190 __u64 req_start = req->l_req_extent.start;
191 __u64 req_end = req->l_req_extent.end;
192 struct ldlm_lock *lock;
196 lockmode_verify(req_mode);
198 /* for waiting locks */
199 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
202 /* We already hit the minimum requested size, search no more */
203 if (new_ex->start == req_start && new_ex->end == req_end) {
208 /* Don't conflict with ourselves */
212 /* Locks are compatible, overlap doesn't matter */
213 /* Until bug 20 is fixed, try to avoid granting overlapping
214 * locks on one client (they take a long time to cancel) */
215 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216 lock->l_export != req->l_export)
219 /* If this is a high-traffic lock, don't grow downwards at all
220 * or grow upwards too much */
223 new_ex->start = req_start;
225 /* If lock doesn't overlap new_ex, skip it. */
226 if (!ldlm_extent_overlap(l_extent, new_ex))
229 /* Locks conflicting in requested extents and we can't satisfy
230 * both locks, so ignore it. Either we will ping-pong this
231 * extent (we would regardless of what extent we granted) or
232 * lock is unused and it shouldn't limit our extent growth. */
233 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
236 /* We grow extents downwards only as far as they don't overlap
237 * with already-granted locks, on the assumption that clients
238 * will be writing beyond the initial requested end and would
239 * then need to enqueue a new lock beyond previous request.
240 * l_req_extent->end strictly < req_start, checked above. */
241 if (l_extent->start < req_start && new_ex->start != req_start) {
242 if (l_extent->end >= req_start)
243 new_ex->start = req_start;
245 new_ex->start = min(l_extent->end+1, req_start);
248 /* If we need to cancel this lock anyways because our request
249 * overlaps the granted lock, we grow up to its requested
250 * extent start instead of limiting this extent, assuming that
251 * clients are writing forwards and the lock had over grown
252 * its extent downwards before we enqueued our request. */
253 if (l_extent->end > req_end) {
254 if (l_extent->start <= req_end)
255 new_ex->end = max(lock->l_req_extent.start - 1,
258 new_ex->end = max(l_extent->start - 1, req_end);
262 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
267 /* In order to determine the largest possible extent we can grant, we need
268 * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270 struct ldlm_lock *lock, __u64 *flags)
272 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
274 if (lock->l_export == NULL)
276 * this is a local lock taken by server (e.g., as a part of
277 * OST-side locking, or unlink handling). Expansion doesn't
278 * make a lot of sense for local locks, because they are
279 * dropped immediately on operation completion and would only
280 * conflict with other threads.
284 if (lock->l_policy_data.l_extent.start == 0 &&
285 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286 /* fast-path whole file locks */
289 /* Because reprocess_queue zeroes flags and uses it to return
290 * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
291 * in the lock flags rather than the 'flags' argument */
292 if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
293 ldlm_extent_internal_policy_granted(lock, &new_ex);
294 ldlm_extent_internal_policy_waiting(lock, &new_ex);
296 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
297 new_ex.start = lock->l_policy_data.l_extent.start;
298 new_ex.end = lock->l_policy_data.l_extent.end;
299 /* In case the request is not on correct boundaries, we call
300 * fixup. (normally called in ldlm_extent_internal_policy_*) */
301 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
304 if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
305 *flags |= LDLM_FL_LOCK_CHANGED;
306 lock->l_policy_data.l_extent.start = new_ex.start;
307 lock->l_policy_data.l_extent.end = new_ex.end;
311 static bool ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
313 struct ldlm_resource *res = lock->l_resource;
314 time64_t now = ktime_get_seconds();
316 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
319 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
320 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
321 res->lr_contention_time = now;
323 return now < res->lr_contention_time +
324 ldlm_res_to_ns(res)->ns_contention_time;
327 struct ldlm_extent_compat_args {
328 struct list_head *work_list;
329 struct ldlm_lock *lock;
335 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
338 struct ldlm_extent_compat_args *priv = data;
339 struct ldlm_interval *node = to_ldlm_interval(n);
340 struct ldlm_extent *extent;
341 struct list_head *work_list = priv->work_list;
342 struct ldlm_lock *lock, *enq = priv->lock;
343 enum ldlm_mode mode = priv->mode;
347 LASSERT(!list_empty(&node->li_group));
349 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
350 /* interval tree is for granted lock */
351 LASSERTF(mode == lock->l_granted_mode,
352 "mode = %s, lock->l_granted_mode = %s\n",
354 ldlm_lockname[lock->l_granted_mode]);
356 if (lock->l_blocking_ast &&
357 lock->l_granted_mode != LCK_GROUP)
358 ldlm_add_ast_work_item(lock, enq, work_list);
361 /* don't count conflicting glimpse locks */
362 extent = ldlm_interval_extent(node);
363 if (!(mode == LCK_PR &&
364 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
365 *priv->locks += count;
370 RETURN(INTERVAL_ITER_CONT);
374 * Determine if the lock is compatible with all locks on the queue.
376 * If \a work_list is provided, conflicting locks are linked there.
377 * If \a work_list is not provided, we exit this function on first conflict.
379 * \retval 0 if the lock is not compatible
380 * \retval 1 if the lock is compatible
381 * \retval 2 if \a req is a group lock and it is compatible and requires
382 * no further checking
383 * \retval negative error, such as EWOULDBLOCK for group locks
386 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
387 __u64 *flags, struct list_head *work_list,
388 int *contended_locks)
390 struct ldlm_resource *res = req->l_resource;
391 enum ldlm_mode req_mode = req->l_req_mode;
392 __u64 req_start = req->l_req_extent.start;
393 __u64 req_end = req->l_req_extent.end;
394 struct ldlm_lock *lock;
395 int check_contention;
399 lockmode_verify(req_mode);
401 /* Using interval tree for granted lock */
402 if (queue == &res->lr_granted) {
403 struct ldlm_interval_tree *tree;
404 struct ldlm_extent_compat_args data = {.work_list = work_list,
406 .locks = contended_locks,
408 struct interval_node_extent ex = { .start = req_start,
412 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
413 tree = &res->lr_itree[idx];
414 if (tree->lit_root == NULL) /* empty tree, skipped */
417 data.mode = tree->lit_mode;
418 if (lockmode_compat(req_mode, tree->lit_mode)) {
419 struct ldlm_interval *node;
420 struct ldlm_extent *extent;
422 if (req_mode != LCK_GROUP)
425 /* group lock, grant it immediately if
427 node = to_ldlm_interval(tree->lit_root);
428 extent = ldlm_interval_extent(node);
429 if (req->l_policy_data.l_extent.gid ==
434 if (tree->lit_mode == LCK_GROUP) {
435 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
436 LDLM_FL_SPECULATIVE)) {
437 compat = -EWOULDBLOCK;
444 /* if work list is not NULL,add all
445 locks in the tree to work list */
447 interval_iterate(tree->lit_root,
448 ldlm_extent_compat_cb, &data);
452 /* We've found a potentially blocking lock, check
453 * compatibility. This handles locks other than GROUP
454 * locks, which are handled separately above.
456 * Locks with FL_SPECULATIVE are asynchronous requests
457 * which must never wait behind another lock, so they
458 * fail if any conflicting lock is found. */
459 if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
460 rc = interval_is_overlapped(tree->lit_root,
466 compat = -EWOULDBLOCK;
471 interval_search(tree->lit_root, &ex,
472 ldlm_extent_compat_cb, &data);
473 if (!list_empty(work_list) && compat)
477 } else { /* for waiting queue */
478 list_for_each_entry(lock, queue, l_res_link) {
479 check_contention = 1;
481 /* We stop walking the queue if we hit ourselves so
482 * we don't take conflicting locks enqueued after us
483 * into account, or we'd wait forever. */
487 /* locks are compatible, overlap doesn't matter */
488 if (lockmode_compat(lock->l_req_mode, req_mode)) {
489 if (req_mode == LCK_PR &&
490 ((lock->l_policy_data.l_extent.start <=
491 req->l_policy_data.l_extent.start) &&
492 (lock->l_policy_data.l_extent.end >=
493 req->l_policy_data.l_extent.end))) {
494 /* If we met a PR lock just like us or
495 wider, and nobody down the list
496 conflicted with it, that means we
497 can skip processing of the rest of
498 the list and safely place ourselves
499 at the end of the list, or grant
500 (dependent if we met an conflicting
501 locks before in the list). In case
502 of 1st enqueue only we continue
503 traversing if there is something
504 conflicting down the list because
505 we need to make sure that something
506 is marked as AST_SENT as well, in
507 cse of empy worklist we would exit
508 on first conflict met. */
509 /* There IS a case where such flag is
510 not set for a lock, yet it blocks
511 something. Luckily for us this is
512 only during destroy, so lock is
513 exclusive. So here we are safe */
514 if (!ldlm_is_ast_sent(lock))
518 /* non-group locks are compatible, overlap doesn't
520 if (likely(req_mode != LCK_GROUP))
523 /* If we are trying to get a GROUP lock and there is
524 another one of this kind, we need to compare gid */
525 if (req->l_policy_data.l_extent.gid ==
526 lock->l_policy_data.l_extent.gid) {
527 /* If existing lock with matched gid is granted,
528 we grant new one too. */
529 if (ldlm_is_granted(lock))
532 /* Otherwise we are scanning queue of waiting
533 * locks and it means current request would
534 * block along with existing lock (that is
536 * If we are in nonblocking mode - return
538 if (*flags & (LDLM_FL_BLOCK_NOWAIT
539 | LDLM_FL_SPECULATIVE)) {
540 compat = -EWOULDBLOCK;
543 /* If this group lock is compatible with another
544 * group lock on the waiting list, they must be
545 * together in the list, so they can be granted
546 * at the same time. Otherwise the later lock
547 * can get stuck behind another, incompatible,
549 ldlm_resource_insert_lock_after(lock, req);
550 /* Because 'lock' is not granted, we can stop
551 * processing this queue and return immediately.
552 * There is no need to check the rest of the
558 if (unlikely(req_mode == LCK_GROUP &&
559 !ldlm_is_granted(lock))) {
561 if (lock->l_req_mode != LCK_GROUP) {
562 /* Ok, we hit non-GROUP lock, there should be no
563 more GROUP locks later on, queue in front of
564 first non-GROUP lock */
566 ldlm_resource_insert_lock_before(lock, req);
569 LASSERT(req->l_policy_data.l_extent.gid !=
570 lock->l_policy_data.l_extent.gid);
574 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
575 /* If compared lock is GROUP, then requested is
576 * PR/PW so this is not compatible; extent
577 * range does not matter */
578 if (*flags & (LDLM_FL_BLOCK_NOWAIT
579 | LDLM_FL_SPECULATIVE)) {
580 compat = -EWOULDBLOCK;
583 } else if (lock->l_policy_data.l_extent.end < req_start ||
584 lock->l_policy_data.l_extent.start > req_end) {
585 /* if a non group lock doesn't overlap skip it */
587 } else if (lock->l_req_extent.end < req_start ||
588 lock->l_req_extent.start > req_end) {
589 /* false contention, the requests doesn't really overlap */
590 check_contention = 0;
596 if (*flags & LDLM_FL_SPECULATIVE) {
597 compat = -EWOULDBLOCK;
601 /* don't count conflicting glimpse locks */
602 if (lock->l_req_mode == LCK_PR &&
603 lock->l_policy_data.l_extent.start == 0 &&
604 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
605 check_contention = 0;
607 *contended_locks += check_contention;
610 if (lock->l_blocking_ast &&
611 lock->l_req_mode != LCK_GROUP)
612 ldlm_add_ast_work_item(lock, req, work_list);
616 if (ldlm_check_contention(req, *contended_locks) &&
618 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
619 req->l_req_mode != LCK_GROUP &&
620 req_end - req_start <=
621 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
622 GOTO(destroylock, compat = -EUSERS);
626 list_del_init(&req->l_res_link);
627 ldlm_lock_destroy_nolock(req);
632 * This function refresh eviction timer for cancelled lock.
633 * \param[in] lock ldlm lock for refresh
634 * \param[in] arg ldlm prolong arguments, timeout, export, extent
635 * and counter are used
637 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
638 struct ldlm_prolong_args *arg)
642 OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
644 if (arg->lpa_export != lock->l_export ||
645 lock->l_flags & LDLM_FL_DESTROYED)
646 /* ignore unrelated locks */
649 arg->lpa_locks_cnt++;
651 if (!(lock->l_flags & LDLM_FL_AST_SENT))
652 /* ignore locks not being cancelled */
655 /* We are in the middle of the process - BL AST is sent, CANCEL
656 * is ahead. Take half of BL AT + IO AT process time.
658 timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
660 LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
662 arg->lpa_blocks_cnt++;
664 /* OK. this is a possible lock the user holds doing I/O
665 * let's refresh eviction timer for it.
667 ldlm_refresh_waiting_lock(lock, timeout);
669 EXPORT_SYMBOL(ldlm_lock_prolong_one);
671 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
674 struct ldlm_prolong_args *arg = data;
675 struct ldlm_interval *node = to_ldlm_interval(n);
676 struct ldlm_lock *lock;
680 LASSERT(!list_empty(&node->li_group));
682 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
683 ldlm_lock_prolong_one(lock, arg);
686 RETURN(INTERVAL_ITER_CONT);
690 * Walk through granted tree and prolong locks if they overlaps extent.
692 * \param[in] arg prolong args
694 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
696 struct ldlm_interval_tree *tree;
697 struct ldlm_resource *res;
698 struct interval_node_extent ex = { .start = arg->lpa_extent.start,
699 .end = arg->lpa_extent.end };
704 res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
705 &arg->lpa_resid, LDLM_EXTENT, 0);
707 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
708 arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
713 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
714 tree = &res->lr_itree[idx];
715 if (tree->lit_root == NULL) /* empty tree, skipped */
718 /* There is no possibility to check for the groupID
719 * so all the group locks are considered as valid
720 * here, especially because the client is supposed
721 * to check it has such a lock before sending an RPC.
723 if (!(tree->lit_mode & arg->lpa_mode))
726 interval_search(tree->lit_root, &ex,
727 ldlm_resource_prolong_cb, arg);
731 ldlm_resource_putref(res);
735 EXPORT_SYMBOL(ldlm_resource_prolong);
738 * Process a granting attempt for extent lock.
739 * Must be called with ns lock held.
741 * This function looks for any conflicts for \a lock in the granted or
742 * waiting queues. The lock is granted if no conflicts are found in
745 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
746 enum ldlm_process_intention intention,
747 enum ldlm_error *err, struct list_head *work_list)
749 struct ldlm_resource *res = lock->l_resource;
751 int contended_locks = 0;
752 struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
756 LASSERT(!ldlm_is_granted(lock));
757 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
758 !ldlm_is_ast_discard_data(lock));
759 check_res_locked(res);
762 if (intention == LDLM_PROCESS_RESCAN) {
763 /* Careful observers will note that we don't handle -EWOULDBLOCK
764 * here, but it's ok for a non-obvious reason -- compat_queue
765 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
766 * SPECULATIVE). flags should always be zero here, and if that
767 * ever stops being true, we want to find out. */
768 LASSERT(*flags == 0);
769 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
770 NULL, &contended_locks);
772 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
777 RETURN(LDLM_ITER_STOP);
779 ldlm_resource_unlink_lock(lock);
781 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
782 ldlm_extent_policy(res, lock, flags);
783 ldlm_grant_lock(lock, grant_work);
784 RETURN(LDLM_ITER_CONTINUE);
788 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
789 work_list, &contended_locks);
791 GOTO(out, *err = rc);
794 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
798 GOTO(out, *err = rc = rc2);
802 ldlm_extent_policy(res, lock, flags);
803 ldlm_resource_unlink_lock(lock);
804 ldlm_grant_lock(lock, grant_work);
806 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
807 * force client to wait for the lock endlessly once
808 * the lock is enqueued -bzzz */
809 *flags |= LDLM_FL_NO_TIMEOUT;
812 RETURN(LDLM_ITER_CONTINUE);
816 #endif /* HAVE_SERVER_SUPPORT */
818 struct ldlm_kms_shift_args {
824 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
825 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
828 struct ldlm_kms_shift_args *arg = args;
829 struct ldlm_interval *node = to_ldlm_interval(n);
830 struct ldlm_lock *tmplock;
831 struct ldlm_lock *lock = NULL;
835 /* Since all locks in an interval have the same extent, we can just
836 * use the first lock without kms_ignore set. */
837 list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
838 if (ldlm_is_kms_ignore(tmplock))
846 /* No locks in this interval without kms_ignore set */
848 RETURN(INTERVAL_ITER_CONT);
850 /* If we find a lock with a greater or equal kms, we are not the
851 * highest lock (or we share that distinction with another lock), and
852 * don't need to update KMS. Return old_kms and stop looking. */
853 if (lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF ||
854 lock->l_policy_data.l_extent.end + 1 >= arg->old_kms) {
855 arg->kms = arg->old_kms;
856 arg->complete = true;
857 RETURN(INTERVAL_ITER_STOP);
860 if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
861 arg->kms = lock->l_policy_data.l_extent.end + 1;
863 /* Since interval_iterate_reverse starts with the highest lock and
864 * works down, for PW locks, we only need to check if we should update
865 * the kms, then stop walking the tree. PR locks are not exclusive, so
866 * the highest start does not imply the highest end and we must
867 * continue. (Only one group lock is allowed per resource, so this is
868 * irrelevant for group locks.)*/
869 if (lock->l_granted_mode == LCK_PW)
870 RETURN(INTERVAL_ITER_STOP);
872 RETURN(INTERVAL_ITER_CONT);
875 /* When a lock is cancelled by a client, the KMS may undergo change if this
876 * is the "highest lock". This function returns the new KMS value, updating
877 * it only if we were the highest lock.
879 * Caller must hold lr_lock already.
881 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
882 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
884 struct ldlm_resource *res = lock->l_resource;
885 struct ldlm_interval_tree *tree;
886 struct ldlm_kms_shift_args args;
891 args.old_kms = old_kms;
893 args.complete = false;
895 /* don't let another thread in ldlm_extent_shift_kms race in
896 * just after we finish and take our lock into account in its
897 * calculation of the kms */
898 ldlm_set_kms_ignore(lock);
900 /* We iterate over the lock trees, looking for the largest kms smaller
901 * than the current one. */
902 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
903 tree = &res->lr_itree[idx];
905 /* If our already known kms is >= than the highest 'end' in
906 * this tree, we don't need to check this tree, because
907 * the kms from a tree can be lower than in_max_high (due to
908 * kms_ignore), but it can never be higher. */
909 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
912 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
915 /* this tells us we're not the highest lock, so we don't need
916 * to check the remaining trees */
921 LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
926 EXPORT_SYMBOL(ldlm_extent_shift_kms);
928 struct kmem_cache *ldlm_interval_slab;
929 static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
931 struct ldlm_interval *node;
934 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
935 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
939 INIT_LIST_HEAD(&node->li_group);
940 ldlm_interval_attach(node, lock);
944 void ldlm_interval_free(struct ldlm_interval *node)
947 LASSERT(list_empty(&node->li_group));
948 LASSERT(!interval_is_intree(&node->li_node));
949 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
953 /* interval tree, for LDLM_EXTENT. */
954 void ldlm_interval_attach(struct ldlm_interval *n,
957 LASSERT(l->l_tree_node == NULL);
958 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
960 list_add_tail(&l->l_sl_policy, &n->li_group);
964 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
966 struct ldlm_interval *n = l->l_tree_node;
971 LASSERT(!list_empty(&n->li_group));
972 l->l_tree_node = NULL;
973 list_del_init(&l->l_sl_policy);
975 return list_empty(&n->li_group) ? n : NULL;
978 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
983 LASSERT(is_power_of_2(mode));
985 LASSERT(index < LCK_MODE_NUM);
989 int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
991 lock->l_tree_node = NULL;
992 if (ldlm_interval_alloc(lock) == NULL)
997 /** Add newly granted lock into interval tree for the resource. */
998 void ldlm_extent_add_lock(struct ldlm_resource *res,
999 struct ldlm_lock *lock)
1001 struct interval_node *found, **root;
1002 struct ldlm_interval *node;
1003 struct ldlm_extent *extent;
1006 LASSERT(ldlm_is_granted(lock));
1008 node = lock->l_tree_node;
1009 LASSERT(node != NULL);
1010 LASSERT(!interval_is_intree(&node->li_node));
1012 idx = ldlm_mode_to_index(lock->l_granted_mode);
1013 LASSERT(lock->l_granted_mode == BIT(idx));
1014 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1016 /* node extent initialize */
1017 extent = &lock->l_policy_data.l_extent;
1019 rc = interval_set(&node->li_node, extent->start, extent->end);
1022 root = &res->lr_itree[idx].lit_root;
1023 found = interval_insert(&node->li_node, root);
1024 if (found) { /* The policy group found. */
1025 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1026 LASSERT(tmp != NULL);
1027 ldlm_interval_free(tmp);
1028 ldlm_interval_attach(to_ldlm_interval(found), lock);
1030 res->lr_itree[idx].lit_size++;
1032 /* even though we use interval tree to manage the extent lock, we also
1033 * add the locks into grant list, for debug purpose, .. */
1034 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1036 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1037 struct ldlm_lock *lck;
1039 list_for_each_entry_reverse(lck, &res->lr_granted,
1043 if (lockmode_compat(lck->l_granted_mode,
1044 lock->l_granted_mode))
1046 if (ldlm_extent_overlap(&lck->l_req_extent,
1047 &lock->l_req_extent)) {
1048 CDEBUG(D_ERROR, "granting conflicting lock %p "
1050 ldlm_resource_dump(D_ERROR, res);
1057 /** Remove cancelled lock from resource interval tree. */
1058 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1060 struct ldlm_resource *res = lock->l_resource;
1061 struct ldlm_interval *node = lock->l_tree_node;
1062 struct ldlm_interval_tree *tree;
1065 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1068 idx = ldlm_mode_to_index(lock->l_granted_mode);
1069 LASSERT(lock->l_granted_mode == BIT(idx));
1070 tree = &res->lr_itree[idx];
1072 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1075 node = ldlm_interval_detach(lock);
1077 interval_erase(&node->li_node, &tree->lit_root);
1078 ldlm_interval_free(node);
1082 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1083 union ldlm_policy_data *lpolicy)
1085 lpolicy->l_extent.start = wpolicy->l_extent.start;
1086 lpolicy->l_extent.end = wpolicy->l_extent.end;
1087 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1090 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1091 union ldlm_wire_policy_data *wpolicy)
1093 memset(wpolicy, 0, sizeof(*wpolicy));
1094 wpolicy->l_extent.start = lpolicy->l_extent.start;
1095 wpolicy->l_extent.end = lpolicy->l_extent.end;
1096 wpolicy->l_extent.gid = lpolicy->l_extent.gid;