4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2010, 2013, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/ldlm/ldlm_extent.c
34 * Author: Peter Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
39 * This file contains implementation of EXTENT lock type
41 * EXTENT lock type is for locking a contiguous range of values, represented
42 * by 64-bit starting and ending offsets (inclusive). There are several extent
43 * lock modes, some of which may be mutually incompatible. Extent locks are
44 * considered incompatible if their modes are incompatible and their extents
45 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
48 #define DEBUG_SUBSYSTEM S_LDLM
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
57 #include "ldlm_internal.h"
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
63 * Fix up the ldlm_extent after expanding it.
65 * After expansion has been done, we might still want to do certain adjusting
66 * based on overall contention of the resource and the like to avoid granting
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70 struct ldlm_extent *new_ex,
73 enum ldlm_mode req_mode = req->l_req_mode;
74 __u64 req_start = req->l_req_extent.start;
75 __u64 req_end = req->l_req_extent.end;
76 __u64 req_align, mask;
78 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
84 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
89 /* we need to ensure that the lock extent is properly aligned to what
90 * the client requested. Also we need to make sure it's also server
91 * page size aligned otherwise a server page can be covered by two
94 req_align = (req_end + 1) | req_start;
95 if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96 while ((req_align & mask) == 0)
100 /* We can only shrink the lock, not grow it.
101 * This should never cause lock to be smaller than requested,
102 * since requested lock was already aligned on these boundaries. */
103 new_ex->start = ((new_ex->start - 1) | mask) + 1;
104 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105 LASSERTF(new_ex->start <= req_start,
106 "mask %#llx grant start %llu req start %llu\n",
107 mask, new_ex->start, req_start);
108 LASSERTF(new_ex->end >= req_end,
109 "mask %#llx grant end %llu req end %llu\n",
110 mask, new_ex->end, req_end);
114 * Return the maximum extent that:
115 * - contains the requested extent
116 * - does not overlap existing conflicting extents outside the requested one
118 * This allows clients to request a small required extent range, but if there
119 * is no contention on the lock the full lock can be granted to the client.
120 * This avoids the need for many smaller lock requests to be granted in the
121 * common (uncontended) case.
123 * Use interval tree to expand the lock extent for granted lock.
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126 struct ldlm_extent *new_ex)
128 struct ldlm_resource *res = req->l_resource;
129 enum ldlm_mode req_mode = req->l_req_mode;
130 __u64 req_start = req->l_req_extent.start;
131 __u64 req_end = req->l_req_extent.end;
132 struct ldlm_interval_tree *tree;
133 struct interval_node_extent limiter = {
134 .start = new_ex->start,
141 lockmode_verify(req_mode);
143 /* Using interval tree to handle the LDLM extent granted locks. */
144 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145 struct interval_node_extent ext = {
150 tree = &res->lr_itree[idx];
151 if (lockmode_compat(tree->lit_mode, req_mode))
154 conflicting += tree->lit_size;
156 limiter.start = req_start;
158 if (interval_is_overlapped(tree->lit_root, &ext))
160 "req_mode = %d, tree->lit_mode = %d, "
161 "tree->lit_size = %d\n",
162 req_mode, tree->lit_mode, tree->lit_size);
163 interval_expand(tree->lit_root, &ext, &limiter);
164 limiter.start = max(limiter.start, ext.start);
165 limiter.end = min(limiter.end, ext.end);
166 if (limiter.start == req_start && limiter.end == req_end)
170 new_ex->start = limiter.start;
171 new_ex->end = limiter.end;
172 LASSERT(new_ex->start <= req_start);
173 LASSERT(new_ex->end >= req_end);
175 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
179 /* The purpose of this function is to return:
180 * - the maximum extent
181 * - containing the requested extent
182 * - and not overlapping existing conflicting extents outside the requested one
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186 struct ldlm_extent *new_ex)
188 struct ldlm_resource *res = req->l_resource;
189 enum ldlm_mode req_mode = req->l_req_mode;
190 __u64 req_start = req->l_req_extent.start;
191 __u64 req_end = req->l_req_extent.end;
192 struct ldlm_lock *lock;
196 lockmode_verify(req_mode);
198 /* for waiting locks */
199 list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
202 /* We already hit the minimum requested size, search no more */
203 if (new_ex->start == req_start && new_ex->end == req_end) {
208 /* Don't conflict with ourselves */
212 /* Locks are compatible, overlap doesn't matter */
213 /* Until bug 20 is fixed, try to avoid granting overlapping
214 * locks on one client (they take a long time to cancel) */
215 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216 lock->l_export != req->l_export)
219 /* If this is a high-traffic lock, don't grow downwards at all
220 * or grow upwards too much */
223 new_ex->start = req_start;
225 /* If lock doesn't overlap new_ex, skip it. */
226 if (!ldlm_extent_overlap(l_extent, new_ex))
229 /* Locks conflicting in requested extents and we can't satisfy
230 * both locks, so ignore it. Either we will ping-pong this
231 * extent (we would regardless of what extent we granted) or
232 * lock is unused and it shouldn't limit our extent growth. */
233 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
236 /* We grow extents downwards only as far as they don't overlap
237 * with already-granted locks, on the assumption that clients
238 * will be writing beyond the initial requested end and would
239 * then need to enqueue a new lock beyond previous request.
240 * l_req_extent->end strictly < req_start, checked above. */
241 if (l_extent->start < req_start && new_ex->start != req_start) {
242 if (l_extent->end >= req_start)
243 new_ex->start = req_start;
245 new_ex->start = min(l_extent->end+1, req_start);
248 /* If we need to cancel this lock anyways because our request
249 * overlaps the granted lock, we grow up to its requested
250 * extent start instead of limiting this extent, assuming that
251 * clients are writing forwards and the lock had over grown
252 * its extent downwards before we enqueued our request. */
253 if (l_extent->end > req_end) {
254 if (l_extent->start <= req_end)
255 new_ex->end = max(lock->l_req_extent.start - 1,
258 new_ex->end = max(l_extent->start - 1, req_end);
262 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
267 /* In order to determine the largest possible extent we can grant, we need
268 * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270 struct ldlm_lock *lock, __u64 *flags)
272 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
274 if (lock->l_export == NULL)
276 * this is local lock taken by server (e.g., as a part of
277 * OST-side locking, or unlink handling). Expansion doesn't
278 * make a lot of sense for local locks, because they are
279 * dropped immediately on operation completion and would only
280 * conflict with other threads.
284 if (lock->l_policy_data.l_extent.start == 0 &&
285 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286 /* fast-path whole file locks */
289 ldlm_extent_internal_policy_granted(lock, &new_ex);
290 ldlm_extent_internal_policy_waiting(lock, &new_ex);
292 if (new_ex.start != lock->l_policy_data.l_extent.start ||
293 new_ex.end != lock->l_policy_data.l_extent.end) {
294 *flags |= LDLM_FL_LOCK_CHANGED;
295 lock->l_policy_data.l_extent.start = new_ex.start;
296 lock->l_policy_data.l_extent.end = new_ex.end;
300 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
302 struct ldlm_resource *res = lock->l_resource;
303 cfs_time_t now = cfs_time_current();
305 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
308 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
309 if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
310 res->lr_contention_time = now;
311 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
312 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
315 struct ldlm_extent_compat_args {
316 struct list_head *work_list;
317 struct ldlm_lock *lock;
323 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
326 struct ldlm_extent_compat_args *priv = data;
327 struct ldlm_interval *node = to_ldlm_interval(n);
328 struct ldlm_extent *extent;
329 struct list_head *work_list = priv->work_list;
330 struct ldlm_lock *lock, *enq = priv->lock;
331 enum ldlm_mode mode = priv->mode;
335 LASSERT(!list_empty(&node->li_group));
337 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
338 /* interval tree is for granted lock */
339 LASSERTF(mode == lock->l_granted_mode,
340 "mode = %s, lock->l_granted_mode = %s\n",
342 ldlm_lockname[lock->l_granted_mode]);
344 if (lock->l_blocking_ast &&
345 lock->l_granted_mode != LCK_GROUP)
346 ldlm_add_ast_work_item(lock, enq, work_list);
349 /* don't count conflicting glimpse locks */
350 extent = ldlm_interval_extent(node);
351 if (!(mode == LCK_PR &&
352 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
353 *priv->locks += count;
358 RETURN(INTERVAL_ITER_CONT);
362 * Determine if the lock is compatible with all locks on the queue.
364 * If \a work_list is provided, conflicting locks are linked there.
365 * If \a work_list is not provided, we exit this function on first conflict.
367 * \retval 0 if the lock is not compatible
368 * \retval 1 if the lock is compatible
369 * \retval 2 if \a req is a group lock and it is compatible and requires
370 * no further checking
371 * \retval negative error, such as EWOULDBLOCK for group locks
374 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
375 __u64 *flags, enum ldlm_error *err,
376 struct list_head *work_list, int *contended_locks)
378 struct ldlm_resource *res = req->l_resource;
379 enum ldlm_mode req_mode = req->l_req_mode;
380 __u64 req_start = req->l_req_extent.start;
381 __u64 req_end = req->l_req_extent.end;
382 struct ldlm_lock *lock;
383 int check_contention;
388 lockmode_verify(req_mode);
390 /* Using interval tree for granted lock */
391 if (queue == &res->lr_granted) {
392 struct ldlm_interval_tree *tree;
393 struct ldlm_extent_compat_args data = {.work_list = work_list,
395 .locks = contended_locks,
397 struct interval_node_extent ex = { .start = req_start,
401 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
402 tree = &res->lr_itree[idx];
403 if (tree->lit_root == NULL) /* empty tree, skipped */
406 data.mode = tree->lit_mode;
407 if (lockmode_compat(req_mode, tree->lit_mode)) {
408 struct ldlm_interval *node;
409 struct ldlm_extent *extent;
411 if (req_mode != LCK_GROUP)
414 /* group lock, grant it immediately if
416 node = to_ldlm_interval(tree->lit_root);
417 extent = ldlm_interval_extent(node);
418 if (req->l_policy_data.l_extent.gid ==
423 if (tree->lit_mode == LCK_GROUP) {
424 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
425 compat = -EWOULDBLOCK;
429 *flags |= LDLM_FL_NO_TIMEOUT;
433 /* if work list is not NULL,add all
434 locks in the tree to work list */
436 interval_iterate(tree->lit_root,
437 ldlm_extent_compat_cb, &data);
442 rc = interval_is_overlapped(tree->lit_root,&ex);
446 interval_search(tree->lit_root, &ex,
447 ldlm_extent_compat_cb, &data);
448 if (!list_empty(work_list) && compat)
452 } else { /* for waiting queue */
453 list_for_each_entry(lock, queue, l_res_link) {
454 check_contention = 1;
456 /* We stop walking the queue if we hit ourselves so
457 * we don't take conflicting locks enqueued after us
458 * into account, or we'd wait forever. */
462 if (unlikely(scan)) {
463 /* We only get here if we are queuing GROUP lock
464 and met some incompatible one. The main idea of this
465 code is to insert GROUP lock past compatible GROUP
466 lock in the waiting queue or if there is not any,
467 then in front of first non-GROUP lock */
468 if (lock->l_req_mode != LCK_GROUP) {
469 /* Ok, we hit non-GROUP lock, there should
470 * be no more GROUP locks later on, queue in
471 * front of first non-GROUP lock */
473 ldlm_resource_insert_lock_after(lock, req);
474 list_del_init(&lock->l_res_link);
475 ldlm_resource_insert_lock_after(req, lock);
479 if (req->l_policy_data.l_extent.gid ==
480 lock->l_policy_data.l_extent.gid) {
482 ldlm_resource_insert_lock_after(lock, req);
489 /* locks are compatible, overlap doesn't matter */
490 if (lockmode_compat(lock->l_req_mode, req_mode)) {
491 if (req_mode == LCK_PR &&
492 ((lock->l_policy_data.l_extent.start <=
493 req->l_policy_data.l_extent.start) &&
494 (lock->l_policy_data.l_extent.end >=
495 req->l_policy_data.l_extent.end))) {
496 /* If we met a PR lock just like us or
497 wider, and nobody down the list
498 conflicted with it, that means we
499 can skip processing of the rest of
500 the list and safely place ourselves
501 at the end of the list, or grant
502 (dependent if we met an conflicting
503 locks before in the list). In case
504 of 1st enqueue only we continue
505 traversing if there is something
506 conflicting down the list because
507 we need to make sure that something
508 is marked as AST_SENT as well, in
509 cse of empy worklist we would exit
510 on first conflict met. */
511 /* There IS a case where such flag is
512 not set for a lock, yet it blocks
513 something. Luckily for us this is
514 only during destroy, so lock is
515 exclusive. So here we are safe */
516 if (!ldlm_is_ast_sent(lock))
520 /* non-group locks are compatible, overlap doesn't
522 if (likely(req_mode != LCK_GROUP))
525 /* If we are trying to get a GROUP lock and there is
526 another one of this kind, we need to compare gid */
527 if (req->l_policy_data.l_extent.gid ==
528 lock->l_policy_data.l_extent.gid) {
529 /* If existing lock with matched gid is granted,
530 we grant new one too. */
531 if (lock->l_req_mode == lock->l_granted_mode)
534 /* Otherwise we are scanning queue of waiting
535 * locks and it means current request would
536 * block along with existing lock (that is
538 * If we are in nonblocking mode - return
540 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
541 compat = -EWOULDBLOCK;
544 /* If this group lock is compatible with another
545 * group lock on the waiting list, they must be
546 * together in the list, so they can be granted
547 * at the same time. Otherwise the later lock
548 * can get stuck behind another, incompatible,
550 ldlm_resource_insert_lock_after(lock, req);
551 /* Because 'lock' is not granted, we can stop
552 * processing this queue and return immediately.
553 * There is no need to check the rest of the
559 if (unlikely(req_mode == LCK_GROUP &&
560 (lock->l_req_mode != lock->l_granted_mode))) {
563 if (lock->l_req_mode != LCK_GROUP) {
564 /* Ok, we hit non-GROUP lock, there should be no
565 more GROUP locks later on, queue in front of
566 first non-GROUP lock */
568 ldlm_resource_insert_lock_after(lock, req);
569 list_del_init(&lock->l_res_link);
570 ldlm_resource_insert_lock_after(req, lock);
573 if (req->l_policy_data.l_extent.gid ==
574 lock->l_policy_data.l_extent.gid) {
576 ldlm_resource_insert_lock_after(lock, req);
582 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
583 /* If compared lock is GROUP, then requested is PR/PW/
584 * so this is not compatible; extent range does not
586 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
587 compat = -EWOULDBLOCK;
590 *flags |= LDLM_FL_NO_TIMEOUT;
592 } else if (lock->l_policy_data.l_extent.end < req_start ||
593 lock->l_policy_data.l_extent.start > req_end) {
594 /* if a non group lock doesn't overlap skip it */
596 } else if (lock->l_req_extent.end < req_start ||
597 lock->l_req_extent.start > req_end) {
598 /* false contention, the requests doesn't really overlap */
599 check_contention = 0;
605 /* don't count conflicting glimpse locks */
606 if (lock->l_req_mode == LCK_PR &&
607 lock->l_policy_data.l_extent.start == 0 &&
608 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
609 check_contention = 0;
611 *contended_locks += check_contention;
614 if (lock->l_blocking_ast &&
615 lock->l_req_mode != LCK_GROUP)
616 ldlm_add_ast_work_item(lock, req, work_list);
620 if (ldlm_check_contention(req, *contended_locks) &&
622 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
623 req->l_req_mode != LCK_GROUP &&
624 req_end - req_start <=
625 ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
626 GOTO(destroylock, compat = -EUSERS);
630 list_del_init(&req->l_res_link);
631 ldlm_lock_destroy_nolock(req);
637 * This function refresh eviction timer for cancelled lock.
638 * \param[in] lock ldlm lock for refresh
639 * \param[in] arg ldlm prolong arguments, timeout, export, extent
640 * and counter are used
642 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
643 struct ldlm_prolong_args *arg)
647 if (arg->lpa_export != lock->l_export ||
648 lock->l_flags & LDLM_FL_DESTROYED)
649 /* ignore unrelated locks */
652 arg->lpa_locks_cnt++;
654 if (!(lock->l_flags & LDLM_FL_AST_SENT))
655 /* ignore locks not being cancelled */
658 /* We are in the middle of the process - BL AST is sent, CANCEL
659 * is ahead. Take half of BL AT + IO AT process time.
661 timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
663 LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
665 arg->lpa_blocks_cnt++;
667 /* OK. this is a possible lock the user holds doing I/O
668 * let's refresh eviction timer for it.
670 ldlm_refresh_waiting_lock(lock, timeout);
672 EXPORT_SYMBOL(ldlm_lock_prolong_one);
674 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
677 struct ldlm_prolong_args *arg = data;
678 struct ldlm_interval *node = to_ldlm_interval(n);
679 struct ldlm_lock *lock;
683 LASSERT(!list_empty(&node->li_group));
685 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
686 ldlm_lock_prolong_one(lock, arg);
689 RETURN(INTERVAL_ITER_CONT);
693 * Walk through granted tree and prolong locks if they overlaps extent.
695 * \param[in] arg prolong args
697 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
699 struct ldlm_interval_tree *tree;
700 struct ldlm_resource *res;
701 struct interval_node_extent ex = { .start = arg->lpa_extent.start,
702 .end = arg->lpa_extent.end };
707 res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
708 &arg->lpa_resid, LDLM_EXTENT, 0);
710 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
711 arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
716 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
717 tree = &res->lr_itree[idx];
718 if (tree->lit_root == NULL) /* empty tree, skipped */
721 /* There is no possibility to check for the groupID
722 * so all the group locks are considered as valid
723 * here, especially because the client is supposed
724 * to check it has such a lock before sending an RPC.
726 if (!(tree->lit_mode & arg->lpa_mode))
729 interval_search(tree->lit_root, &ex,
730 ldlm_resource_prolong_cb, arg);
734 ldlm_resource_putref(res);
738 EXPORT_SYMBOL(ldlm_resource_prolong);
742 * Discard all AST work items from list.
744 * If for whatever reason we do not want to send ASTs to conflicting locks
745 * anymore, disassemble the list with this function.
747 static void discard_bl_list(struct list_head *bl_list)
749 struct list_head *tmp, *pos;
752 list_for_each_safe(pos, tmp, bl_list) {
753 struct ldlm_lock *lock =
754 list_entry(pos, struct ldlm_lock, l_bl_ast);
756 list_del_init(&lock->l_bl_ast);
757 LASSERT(ldlm_is_ast_sent(lock));
758 ldlm_clear_ast_sent(lock);
759 LASSERT(lock->l_bl_ast_run == 0);
760 LASSERT(lock->l_blocking_lock);
761 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
762 lock->l_blocking_lock = NULL;
763 LDLM_LOCK_RELEASE(lock);
769 * Process a granting attempt for extent lock.
770 * Must be called with ns lock held.
772 * This function looks for any conflicts for \a lock in the granted or
773 * waiting queues. The lock is granted if no conflicts are found in
776 * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
777 * - blocking ASTs have already been sent
779 * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
780 * - blocking ASTs have not been sent yet, so list of conflicting locks
781 * would be collected and ASTs sent.
783 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
784 int first_enq, enum ldlm_error *err,
785 struct list_head *work_list)
787 struct ldlm_resource *res = lock->l_resource;
788 struct list_head rpc_list;
790 int contended_locks = 0;
793 LASSERT(lock->l_granted_mode != lock->l_req_mode);
794 LASSERT(list_empty(&res->lr_converting));
795 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
796 !ldlm_is_ast_discard_data(lock));
797 INIT_LIST_HEAD(&rpc_list);
798 check_res_locked(res);
802 /* Careful observers will note that we don't handle -EWOULDBLOCK
803 * here, but it's ok for a non-obvious reason -- compat_queue
804 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
805 * flags should always be zero here, and if that ever stops
806 * being true, we want to find out. */
807 LASSERT(*flags == 0);
808 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
809 err, NULL, &contended_locks);
811 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
816 RETURN(LDLM_ITER_STOP);
818 ldlm_resource_unlink_lock(lock);
820 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
821 ldlm_extent_policy(res, lock, flags);
822 ldlm_grant_lock(lock, work_list);
823 RETURN(LDLM_ITER_CONTINUE);
828 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
829 &rpc_list, &contended_locks);
831 GOTO(out, rc); /* lock was destroyed */
835 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
836 &rpc_list, &contended_locks);
838 GOTO(out, rc = rc2); /* lock was destroyed */
842 ldlm_extent_policy(res, lock, flags);
843 ldlm_resource_unlink_lock(lock);
844 ldlm_grant_lock(lock, NULL);
846 /* If either of the compat_queue()s returned failure, then we
847 * have ASTs to send and must go onto the waiting list.
849 * bug 2322: we used to unlink and re-add here, which was a
850 * terrible folly -- if we goto restart, we could get
851 * re-ordered! Causes deadlock, because ASTs aren't sent! */
852 if (list_empty(&lock->l_res_link))
853 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
855 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
858 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
859 !ns_is_client(ldlm_res_to_ns(res)))
860 class_fail_export(lock->l_export);
863 if (rc == -ERESTART) {
864 /* 15715: The lock was granted and destroyed after
865 * resource lock was dropped. Interval node was freed
866 * in ldlm_lock_destroy. Anyway, this always happens
867 * when a client is being evicted. So it would be
868 * ok to return an error. -jay */
869 if (ldlm_is_destroyed(lock)) {
871 GOTO(out, rc = -EAGAIN);
874 /* lock was granted while resource was unlocked. */
875 if (lock->l_granted_mode == lock->l_req_mode) {
876 /* bug 11300: if the lock has been granted,
877 * break earlier because otherwise, we will go
878 * to restart and ldlm_resource_unlink will be
879 * called and it causes the interval node to be
880 * freed. Then we will fail at
881 * ldlm_extent_add_lock() */
882 *flags &= ~LDLM_FL_BLOCKED_MASK;
889 /* this way we force client to wait for the lock
890 * endlessly once the lock is enqueued -bzzz */
891 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
896 if (!list_empty(&rpc_list)) {
897 LASSERT(!ldlm_is_ast_discard_data(lock));
898 discard_bl_list(&rpc_list);
902 #endif /* HAVE_SERVER_SUPPORT */
904 struct ldlm_kms_shift_args {
910 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
911 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
914 struct ldlm_kms_shift_args *arg = args;
915 struct ldlm_interval *node = to_ldlm_interval(n);
916 struct ldlm_lock *tmplock;
917 struct ldlm_lock *lock = NULL;
921 /* Since all locks in an interval have the same extent, we can just
922 * use the first lock without kms_ignore set. */
923 list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
924 if (ldlm_is_kms_ignore(tmplock))
932 /* No locks in this interval without kms_ignore set */
934 RETURN(INTERVAL_ITER_CONT);
936 /* If we find a lock with a greater or equal kms, we are not the
937 * highest lock (or we share that distinction with another lock), and
938 * don't need to update KMS. Return old_kms and stop looking. */
939 if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
940 arg->kms = arg->old_kms;
941 arg->complete = true;
942 RETURN(INTERVAL_ITER_STOP);
945 if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
946 arg->kms = lock->l_policy_data.l_extent.end + 1;
948 /* Since interval_iterate_reverse starts with the highest lock and
949 * works down, for PW locks, we only need to check if we should update
950 * the kms, then stop walking the tree. PR locks are not exclusive, so
951 * the highest start does not imply the highest end and we must
952 * continue. (Only one group lock is allowed per resource, so this is
953 * irrelevant for group locks.)*/
954 if (lock->l_granted_mode == LCK_PW)
955 RETURN(INTERVAL_ITER_STOP);
957 RETURN(INTERVAL_ITER_CONT);
960 /* When a lock is cancelled by a client, the KMS may undergo change if this
961 * is the "highest lock". This function returns the new KMS value, updating
962 * it only if we were the highest lock.
964 * Caller must hold lr_lock already.
966 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
967 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
969 struct ldlm_resource *res = lock->l_resource;
970 struct ldlm_interval_tree *tree;
971 struct ldlm_kms_shift_args args;
976 args.old_kms = old_kms;
978 args.complete = false;
980 /* don't let another thread in ldlm_extent_shift_kms race in
981 * just after we finish and take our lock into account in its
982 * calculation of the kms */
983 ldlm_set_kms_ignore(lock);
985 /* We iterate over the lock trees, looking for the largest kms smaller
986 * than the current one. */
987 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
988 tree = &res->lr_itree[idx];
990 /* If our already known kms is >= than the highest 'end' in
991 * this tree, we don't need to check this tree, because
992 * the kms from a tree can be lower than in_max_high (due to
993 * kms_ignore), but it can never be higher. */
994 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
997 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
1000 /* this tells us we're not the highest lock, so we don't need
1001 * to check the remaining trees */
1006 LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
1011 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1013 struct kmem_cache *ldlm_interval_slab;
1014 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
1016 struct ldlm_interval *node;
1019 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
1020 OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1024 INIT_LIST_HEAD(&node->li_group);
1025 ldlm_interval_attach(node, lock);
1029 void ldlm_interval_free(struct ldlm_interval *node)
1032 LASSERT(list_empty(&node->li_group));
1033 LASSERT(!interval_is_intree(&node->li_node));
1034 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1038 /* interval tree, for LDLM_EXTENT. */
1039 void ldlm_interval_attach(struct ldlm_interval *n,
1040 struct ldlm_lock *l)
1042 LASSERT(l->l_tree_node == NULL);
1043 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
1045 list_add_tail(&l->l_sl_policy, &n->li_group);
1049 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
1051 struct ldlm_interval *n = l->l_tree_node;
1056 LASSERT(!list_empty(&n->li_group));
1057 l->l_tree_node = NULL;
1058 list_del_init(&l->l_sl_policy);
1060 return list_empty(&n->li_group) ? n : NULL;
1063 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
1068 LASSERT(is_power_of_2(mode));
1069 for (index = -1; mode != 0; index++, mode >>= 1)
1071 LASSERT(index < LCK_MODE_NUM);
1075 /** Add newly granted lock into interval tree for the resource. */
1076 void ldlm_extent_add_lock(struct ldlm_resource *res,
1077 struct ldlm_lock *lock)
1079 struct interval_node *found, **root;
1080 struct ldlm_interval *node;
1081 struct ldlm_extent *extent;
1084 LASSERT(lock->l_granted_mode == lock->l_req_mode);
1086 node = lock->l_tree_node;
1087 LASSERT(node != NULL);
1088 LASSERT(!interval_is_intree(&node->li_node));
1090 idx = ldlm_mode_to_index(lock->l_granted_mode);
1091 LASSERT(lock->l_granted_mode == 1 << idx);
1092 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1094 /* node extent initialize */
1095 extent = &lock->l_policy_data.l_extent;
1097 rc = interval_set(&node->li_node, extent->start, extent->end);
1100 root = &res->lr_itree[idx].lit_root;
1101 found = interval_insert(&node->li_node, root);
1102 if (found) { /* The policy group found. */
1103 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1104 LASSERT(tmp != NULL);
1105 ldlm_interval_free(tmp);
1106 ldlm_interval_attach(to_ldlm_interval(found), lock);
1108 res->lr_itree[idx].lit_size++;
1110 /* even though we use interval tree to manage the extent lock, we also
1111 * add the locks into grant list, for debug purpose, .. */
1112 ldlm_resource_add_lock(res, &res->lr_granted, lock);
1114 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1115 struct ldlm_lock *lck;
1117 list_for_each_entry_reverse(lck, &res->lr_granted,
1121 if (lockmode_compat(lck->l_granted_mode,
1122 lock->l_granted_mode))
1124 if (ldlm_extent_overlap(&lck->l_req_extent,
1125 &lock->l_req_extent)) {
1126 CDEBUG(D_ERROR, "granting conflicting lock %p "
1128 ldlm_resource_dump(D_ERROR, res);
1135 /** Remove cancelled lock from resource interval tree. */
1136 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1138 struct ldlm_resource *res = lock->l_resource;
1139 struct ldlm_interval *node = lock->l_tree_node;
1140 struct ldlm_interval_tree *tree;
1143 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1146 idx = ldlm_mode_to_index(lock->l_granted_mode);
1147 LASSERT(lock->l_granted_mode == 1 << idx);
1148 tree = &res->lr_itree[idx];
1150 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1153 node = ldlm_interval_detach(lock);
1155 interval_erase(&node->li_node, &tree->lit_root);
1156 ldlm_interval_free(node);
1160 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1161 union ldlm_policy_data *lpolicy)
1163 lpolicy->l_extent.start = wpolicy->l_extent.start;
1164 lpolicy->l_extent.end = wpolicy->l_extent.end;
1165 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1168 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1169 union ldlm_wire_policy_data *wpolicy)
1171 memset(wpolicy, 0, sizeof(*wpolicy));
1172 wpolicy->l_extent.start = lpolicy->l_extent.start;
1173 wpolicy->l_extent.end = lpolicy->l_extent.end;
1174 wpolicy->l_extent.gid = lpolicy->l_extent.gid;