1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of the Lustre file system, http://www.lustre.org
9 * Lustre is a trademark of Cluster File Systems, Inc.
11 * You may have signed or agreed to another license before downloading
12 * this software. If so, you are bound by the terms and conditions
13 * of that agreement, and the following does not apply to you. See the
14 * LICENSE file included with this distribution for more information.
16 * If you did not agree to a different license, then this copy of Lustre
17 * is open source software; you can redistribute it and/or modify it
18 * under the terms of version 2 of the GNU General Public License as
19 * published by the Free Software Foundation.
21 * In either case, Lustre is distributed in the hope that it will be
22 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * license text for more details.
27 #define DEBUG_SUBSYSTEM S_LDLM
29 # include <liblustre.h>
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
35 #include <lustre_lib.h>
37 #include "ldlm_internal.h"
39 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
41 /* fixup the ldlm_extent after expanding */
42 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
43 struct ldlm_extent *new_ex,
46 ldlm_mode_t req_mode = req->l_req_mode;
47 __u64 req_start = req->l_req_extent.start;
48 __u64 req_end = req->l_req_extent.end;
49 __u64 req_align, mask;
51 if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
52 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
53 new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
57 if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
62 /* we need to ensure that the lock extent is properly aligned to what
63 * the client requested. We align it to the lowest-common denominator
64 * of the clients requested lock start and end alignment. */
66 req_align = (req_end + 1) | req_start;
68 while ((req_align & mask) == 0)
72 /* We can only shrink the lock, not grow it.
73 * This should never cause lock to be smaller than requested,
74 * since requested lock was already aligned on these boundaries. */
75 new_ex->start = ((new_ex->start - 1) | mask) + 1;
76 new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
77 LASSERTF(new_ex->start <= req_start,
78 "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
79 mask, new_ex->start, req_start);
80 LASSERTF(new_ex->end >= req_end,
81 "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
82 mask, new_ex->end, req_end);
85 /* The purpose of this function is to return:
86 * - the maximum extent
87 * - containing the requested extent
88 * - and not overlapping existing conflicting extents outside the requested one
90 * Use interval tree to expand the lock extent for granted lock.
92 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
93 struct ldlm_extent *new_ex)
95 struct ldlm_resource *res = req->l_resource;
96 ldlm_mode_t req_mode = req->l_req_mode;
97 __u64 req_start = req->l_req_extent.start;
98 __u64 req_end = req->l_req_extent.end;
99 struct ldlm_interval_tree *tree;
100 struct interval_node_extent limiter = { new_ex->start, new_ex->end };
105 lockmode_verify(req_mode);
107 /* using interval tree to handle the ldlm extent granted locks */
108 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
109 struct interval_node_extent ext = { req_start, req_end };
111 tree = &res->lr_itree[idx];
112 if (lockmode_compat(tree->lit_mode, req_mode))
115 conflicting += tree->lit_size;
117 limiter.start = req_start;
119 if (interval_is_overlapped(tree->lit_root, &ext))
120 printk("req_mode = %d, tree->lit_mode = %d, tree->lit_size = %d\n",
121 req_mode, tree->lit_mode, tree->lit_size);
122 interval_expand(tree->lit_root, &ext, &limiter);
123 limiter.start = max(limiter.start, ext.start);
124 limiter.end = min(limiter.end, ext.end);
125 if (limiter.start == req_start && limiter.end == req_end)
129 new_ex->start = limiter.start;
130 new_ex->end = limiter.end;
131 LASSERT(new_ex->start <= req_start);
132 LASSERT(new_ex->end >= req_end);
134 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
138 /* The purpose of this function is to return:
139 * - the maximum extent
140 * - containing the requested extent
141 * - and not overlapping existing conflicting extents outside the requested one
144 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
145 struct ldlm_extent *new_ex)
147 struct list_head *tmp;
148 struct ldlm_resource *res = req->l_resource;
149 ldlm_mode_t req_mode = req->l_req_mode;
150 __u64 req_start = req->l_req_extent.start;
151 __u64 req_end = req->l_req_extent.end;
155 lockmode_verify(req_mode);
157 /* for waiting locks */
158 list_for_each(tmp, &res->lr_waiting) {
159 struct ldlm_lock *lock;
160 struct ldlm_extent *l_extent;
162 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
163 l_extent = &lock->l_policy_data.l_extent;
165 /* We already hit the minimum requested size, search no more */
166 if (new_ex->start == req_start && new_ex->end == req_end) {
171 /* Don't conflict with ourselves */
175 /* Locks are compatible, overlap doesn't matter */
176 /* Until bug 20 is fixed, try to avoid granting overlapping
177 * locks on one client (they take a long time to cancel) */
178 if (lockmode_compat(lock->l_req_mode, req_mode) &&
179 lock->l_export != req->l_export)
182 /* If this is a high-traffic lock, don't grow downwards at all
183 * or grow upwards too much */
186 new_ex->start = req_start;
188 /* If lock doesn't overlap new_ex, skip it. */
189 if (!ldlm_extent_overlap(l_extent, new_ex))
192 /* Locks conflicting in requested extents and we can't satisfy
193 * both locks, so ignore it. Either we will ping-pong this
194 * extent (we would regardless of what extent we granted) or
195 * lock is unused and it shouldn't limit our extent growth. */
196 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
199 /* We grow extents downwards only as far as they don't overlap
200 * with already-granted locks, on the assumtion that clients
201 * will be writing beyond the initial requested end and would
202 * then need to enqueue a new lock beyond previous request.
203 * l_req_extent->end strictly < req_start, checked above. */
204 if (l_extent->start < req_start && new_ex->start != req_start) {
205 if (l_extent->end >= req_start)
206 new_ex->start = req_start;
208 new_ex->start = min(l_extent->end+1, req_start);
211 /* If we need to cancel this lock anyways because our request
212 * overlaps the granted lock, we grow up to its requested
213 * extent start instead of limiting this extent, assuming that
214 * clients are writing forwards and the lock had over grown
215 * its extent downwards before we enqueued our request. */
216 if (l_extent->end > req_end) {
217 if (l_extent->start <= req_end)
218 new_ex->end = max(lock->l_req_extent.start - 1,
221 new_ex->end = max(l_extent->start - 1, req_end);
225 ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
230 /* In order to determine the largest possible extent we can grant, we need
231 * to scan all of the queues. */
232 static void ldlm_extent_policy(struct ldlm_resource *res,
233 struct ldlm_lock *lock, int *flags)
235 struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
237 if (lock->l_export == NULL)
239 * this is local lock taken by server (e.g., as a part of
240 * OST-side locking, or unlink handling). Expansion doesn't
241 * make a lot of sense for local locks, because they are
242 * dropped immediately on operation completion and would only
243 * conflict with other threads.
247 if (lock->l_policy_data.l_extent.start == 0 &&
248 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
249 /* fast-path whole file locks */
252 ldlm_extent_internal_policy_granted(lock, &new_ex);
253 ldlm_extent_internal_policy_waiting(lock, &new_ex);
255 if (new_ex.start != lock->l_policy_data.l_extent.start ||
256 new_ex.end != lock->l_policy_data.l_extent.end) {
257 *flags |= LDLM_FL_LOCK_CHANGED;
258 lock->l_policy_data.l_extent.start = new_ex.start;
259 lock->l_policy_data.l_extent.end = new_ex.end;
263 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
265 struct ldlm_resource *res = lock->l_resource;
266 cfs_time_t now = cfs_time_current();
268 CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
269 if (contended_locks > res->lr_namespace->ns_contended_locks)
270 res->lr_contention_time = now;
271 return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
272 cfs_time_seconds(res->lr_namespace->ns_contention_time)));
275 struct ldlm_extent_compat_args {
276 struct list_head *work_list;
277 struct ldlm_lock *lock;
283 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
286 struct ldlm_extent_compat_args *priv = data;
287 struct ldlm_interval *node = to_ldlm_interval(n);
288 struct ldlm_extent *extent;
289 struct list_head *work_list = priv->work_list;
290 struct ldlm_lock *lock, *enq = priv->lock;
291 ldlm_mode_t mode = priv->mode;
295 LASSERT(!list_empty(&node->li_group));
297 list_for_each_entry(lock, &node->li_group, l_sl_policy) {
298 /* interval tree is for granted lock */
299 LASSERTF(mode == lock->l_granted_mode,
300 "mode = %s, lock->l_granted_mode = %s\n",
302 ldlm_lockname[lock->l_granted_mode]);
305 if (lock->l_blocking_ast)
306 ldlm_add_ast_work_item(lock, enq, work_list);
310 /* don't count conflicting glimpse locks */
311 extent = ldlm_interval_extent(node);
312 if (!(mode == LCK_PR &&
313 extent->start == 0 && extent->end == OBD_OBJECT_EOF))
314 *priv->locks += count;
319 RETURN(INTERVAL_ITER_CONT);
322 /* Determine if the lock is compatible with all locks on the queue.
323 * We stop walking the queue if we hit ourselves so we don't take
324 * conflicting locks enqueued after us into accound, or we'd wait forever.
326 * 0 if the lock is not compatible
327 * 1 if the lock is compatible
328 * 2 if this group lock is compatible and requires no further checking
329 * negative error, such as EWOULDBLOCK for group locks
332 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
333 int *flags, ldlm_error_t *err,
334 struct list_head *work_list, int *contended_locks)
336 struct list_head *tmp;
337 struct ldlm_lock *lock;
338 struct ldlm_resource *res = req->l_resource;
339 ldlm_mode_t req_mode = req->l_req_mode;
340 __u64 req_start = req->l_req_extent.start;
341 __u64 req_end = req->l_req_extent.end;
344 int check_contention;
347 lockmode_verify(req_mode);
349 /* Using interval tree for granted lock */
350 if (queue == &res->lr_granted) {
351 struct ldlm_interval_tree *tree;
352 struct ldlm_extent_compat_args data = {.work_list = work_list,
354 .locks = contended_locks,
356 struct interval_node_extent ex = { .start = req_start,
360 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
361 tree = &res->lr_itree[idx];
362 if (tree->lit_root == NULL) /* empty tree, skipped */
365 data.mode = tree->lit_mode;
366 if (lockmode_compat(req_mode, tree->lit_mode)) {
367 struct ldlm_interval *node;
368 struct ldlm_extent *extent;
370 if (req_mode != LCK_GROUP)
373 /* group lock, grant it immediately if
375 node = to_ldlm_interval(tree->lit_root);
376 extent = ldlm_interval_extent(node);
377 if (req->l_policy_data.l_extent.gid ==
382 if (tree->lit_mode == LCK_GROUP) {
383 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
384 compat = -EWOULDBLOCK;
388 *flags |= LDLM_FL_NO_TIMEOUT;
392 /* if work list is not NULL,add all
393 locks in the tree to work list */
395 interval_iterate(tree->lit_root,
396 ldlm_extent_compat_cb, &data);
401 rc = interval_is_overlapped(tree->lit_root,&ex);
405 interval_search(tree->lit_root, &ex,
406 ldlm_extent_compat_cb, &data);
407 if (!list_empty(work_list) && compat)
412 /* for waiting queue */
413 list_for_each(tmp, queue) {
414 check_contention = 1;
416 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
421 if (unlikely(scan)) {
422 /* We only get here if we are queuing GROUP lock
423 and met some incompatible one. The main idea of this
424 code is to insert GROUP lock past compatible GROUP
425 lock in the waiting queue or if there is not any,
426 then in front of first non-GROUP lock */
427 if (lock->l_req_mode != LCK_GROUP) {
428 /* Ok, we hit non-GROUP lock, there should be no
429 more GROUP locks later on, queue in front of
430 first non-GROUP lock */
432 ldlm_resource_insert_lock_after(lock, req);
433 list_del_init(&lock->l_res_link);
434 ldlm_resource_insert_lock_after(req, lock);
438 if (req->l_policy_data.l_extent.gid ==
439 lock->l_policy_data.l_extent.gid) {
441 ldlm_resource_insert_lock_after(lock, req);
448 /* locks are compatible, overlap doesn't matter */
449 if (lockmode_compat(lock->l_req_mode, req_mode)) {
450 if (req_mode == LCK_PR &&
451 ((lock->l_policy_data.l_extent.start <=
452 req->l_policy_data.l_extent.start) &&
453 (lock->l_policy_data.l_extent.end >=
454 req->l_policy_data.l_extent.end))) {
455 /* If we met a PR lock just like us or wider,
456 and nobody down the list conflicted with
457 it, that means we can skip processing of
458 the rest of the list and safely place
459 ourselves at the end of the list, or grant
460 (dependent if we met an conflicting locks
462 In case of 1st enqueue only we continue
463 traversing if there is something conflicting
464 down the list because we need to make sure
465 that something is marked as AST_SENT as well,
466 in cse of empy worklist we would exit on
467 first conflict met. */
468 /* There IS a case where such flag is
469 not set for a lock, yet it blocks
470 something. Luckily for us this is
471 only during destroy, so lock is
472 exclusive. So here we are safe */
473 if (!(lock->l_flags & LDLM_FL_AST_SENT)) {
478 /* non-group locks are compatible, overlap doesn't
480 if (likely(req_mode != LCK_GROUP))
483 /* If we are trying to get a GROUP lock and there is
484 another one of this kind, we need to compare gid */
485 if (req->l_policy_data.l_extent.gid ==
486 lock->l_policy_data.l_extent.gid) {
487 /* If existing lock with matched gid is granted,
488 we grant new one too. */
489 if (lock->l_req_mode == lock->l_granted_mode)
492 /* Otherwise we are scanning queue of waiting
493 * locks and it means current request would
494 * block along with existing lock (that is
496 * If we are in nonblocking mode - return
498 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
499 compat = -EWOULDBLOCK;
502 /* If this group lock is compatible with another
503 * group lock on the waiting list, they must be
504 * together in the list, so they can be granted
505 * at the same time. Otherwise the later lock
506 * can get stuck behind another, incompatible,
508 ldlm_resource_insert_lock_after(lock, req);
509 /* Because 'lock' is not granted, we can stop
510 * processing this queue and return immediately.
511 * There is no need to check the rest of the
517 if (unlikely(req_mode == LCK_GROUP &&
518 (lock->l_req_mode != lock->l_granted_mode))) {
521 if (lock->l_req_mode != LCK_GROUP) {
522 /* Ok, we hit non-GROUP lock, there should
523 * be no more GROUP locks later on, queue in
524 * front of first non-GROUP lock */
526 ldlm_resource_insert_lock_after(lock, req);
527 list_del_init(&lock->l_res_link);
528 ldlm_resource_insert_lock_after(req, lock);
531 if (req->l_policy_data.l_extent.gid ==
532 lock->l_policy_data.l_extent.gid) {
534 ldlm_resource_insert_lock_after(lock, req);
540 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
541 /* If compared lock is GROUP, then requested is PR/PW/
542 * so this is not compatible; extent range does not
544 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
545 compat = -EWOULDBLOCK;
548 *flags |= LDLM_FL_NO_TIMEOUT;
550 } else if (lock->l_policy_data.l_extent.end < req_start ||
551 lock->l_policy_data.l_extent.start > req_end) {
552 /* if a non group lock doesn't overlap skip it */
554 } else if (lock->l_req_extent.end < req_start ||
555 lock->l_req_extent.start > req_end)
556 /* false contention, the requests doesn't really overlap */
557 check_contention = 0;
562 /* don't count conflicting glimpse locks */
563 if (lock->l_req_mode == LCK_PR &&
564 lock->l_policy_data.l_extent.start == 0 &&
565 lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
566 check_contention = 0;
568 *contended_locks += check_contention;
571 if (lock->l_blocking_ast)
572 ldlm_add_ast_work_item(lock, req, work_list);
576 if (ldlm_check_contention(req, *contended_locks) &&
578 (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
579 req->l_req_mode != LCK_GROUP &&
580 req_end - req_start <=
581 req->l_resource->lr_namespace->ns_max_nolock_size)
582 GOTO(destroylock, compat = -EUSERS);
586 list_del_init(&req->l_res_link);
587 ldlm_lock_destroy_nolock(req);
592 static void discard_bl_list(struct list_head *bl_list)
594 struct list_head *tmp, *pos;
597 list_for_each_safe(pos, tmp, bl_list) {
598 struct ldlm_lock *lock =
599 list_entry(pos, struct ldlm_lock, l_bl_ast);
601 list_del_init(&lock->l_bl_ast);
602 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
603 lock->l_flags &= ~LDLM_FL_AST_SENT;
604 LASSERT(lock->l_bl_ast_run == 0);
605 LASSERT(lock->l_blocking_lock);
606 LDLM_LOCK_PUT(lock->l_blocking_lock);
607 lock->l_blocking_lock = NULL;
613 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
614 * - blocking ASTs have already been sent
615 * - must call this function with the ns lock held
617 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
618 * - blocking ASTs have not been sent
619 * - must call this function with the ns lock held once */
620 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
621 ldlm_error_t *err, struct list_head *work_list)
623 struct ldlm_resource *res = lock->l_resource;
624 struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
626 int contended_locks = 0;
629 LASSERT(list_empty(&res->lr_converting));
630 LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
631 !(lock->l_flags & LDLM_AST_DISCARD_DATA));
632 check_res_locked(res);
636 /* Careful observers will note that we don't handle -EWOULDBLOCK
637 * here, but it's ok for a non-obvious reason -- compat_queue
638 * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
639 * flags should always be zero here, and if that ever stops
640 * being true, we want to find out. */
641 LASSERT(*flags == 0);
642 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
643 err, NULL, &contended_locks);
645 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
650 RETURN(LDLM_ITER_STOP);
652 ldlm_resource_unlink_lock(lock);
654 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
655 ldlm_extent_policy(res, lock, flags);
656 ldlm_grant_lock(lock, work_list);
657 RETURN(LDLM_ITER_CONTINUE);
662 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
663 &rpc_list, &contended_locks);
665 GOTO(out, rc); /* lock was destroyed */
669 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
670 &rpc_list, &contended_locks);
672 GOTO(out, rc = rc2); /* lock was destroyed */
676 ldlm_extent_policy(res, lock, flags);
677 ldlm_resource_unlink_lock(lock);
678 ldlm_grant_lock(lock, NULL);
680 /* If either of the compat_queue()s returned failure, then we
681 * have ASTs to send and must go onto the waiting list.
683 * bug 2322: we used to unlink and re-add here, which was a
684 * terrible folly -- if we goto restart, we could get
685 * re-ordered! Causes deadlock, because ASTs aren't sent! */
686 if (list_empty(&lock->l_res_link))
687 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
689 rc = ldlm_run_bl_ast_work(&rpc_list);
692 if (rc == -ERESTART) {
693 /* lock was granted while resource was unlocked. */
694 if (lock->l_granted_mode == lock->l_req_mode) {
695 /* bug 11300: if the lock has been granted,
696 * break earlier because otherwise, we will go
697 * to restart and ldlm_resource_unlink will be
698 * called and it causes the interval node to be
699 * freed. Then we will fail at
700 * ldlm_extent_add_lock() */
701 *flags &= ~(LDLM_FL_BLOCK_GRANTED | LDLM_FL_BLOCK_CONV |
706 GOTO(restart, -ERESTART);
709 *flags |= LDLM_FL_BLOCK_GRANTED;
710 /* this way we force client to wait for the lock
711 * endlessly once the lock is enqueued -bzzz */
712 *flags |= LDLM_FL_NO_TIMEOUT;
717 if (!list_empty(&rpc_list)) {
718 LASSERT(!(lock->l_flags & LDLM_AST_DISCARD_DATA));
719 discard_bl_list(&rpc_list);
724 /* When a lock is cancelled by a client, the KMS may undergo change if this
725 * is the "highest lock". This function returns the new KMS value.
726 * Caller must hold ns_lock already.
728 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
729 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
731 struct ldlm_resource *res = lock->l_resource;
732 struct list_head *tmp;
733 struct ldlm_lock *lck;
737 /* don't let another thread in ldlm_extent_shift_kms race in
738 * just after we finish and take our lock into account in its
739 * calculation of the kms */
740 lock->l_flags |= LDLM_FL_KMS_IGNORE;
742 list_for_each(tmp, &res->lr_granted) {
743 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
745 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
748 if (lck->l_policy_data.l_extent.end >= old_kms)
751 /* This extent _has_ to be smaller than old_kms (checked above)
752 * so kms can only ever be smaller or the same as old_kms. */
753 if (lck->l_policy_data.l_extent.end + 1 > kms)
754 kms = lck->l_policy_data.l_extent.end + 1;
756 LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
761 cfs_mem_cache_t *ldlm_interval_slab;
762 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
764 struct ldlm_interval *node;
767 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
768 OBD_SLAB_ALLOC(node, ldlm_interval_slab, CFS_ALLOC_IO, sizeof(*node));
772 CFS_INIT_LIST_HEAD(&node->li_group);
773 ldlm_interval_attach(node, lock);
777 void ldlm_interval_free(struct ldlm_interval *node)
780 LASSERT(list_empty(&node->li_group));
781 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
785 /* interval tree, for LDLM_EXTENT. */
786 void ldlm_interval_attach(struct ldlm_interval *n,
789 LASSERT(l->l_tree_node == NULL);
790 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
792 list_add_tail(&l->l_sl_policy, &n->li_group);
796 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
798 struct ldlm_interval *n = l->l_tree_node;
803 LASSERT(!list_empty(&n->li_group));
804 l->l_tree_node = NULL;
805 list_del_init(&l->l_sl_policy);
807 return (list_empty(&n->li_group) ? n : NULL);
810 static inline int lock_mode_to_index(ldlm_mode_t mode)
815 LASSERT(IS_PO2(mode));
816 for (index = -1; mode; index++, mode >>= 1) ;
817 LASSERT(index < LCK_MODE_NUM);
821 void ldlm_extent_add_lock(struct ldlm_resource *res,
822 struct ldlm_lock *lock)
824 struct interval_node *found, **root;
825 struct ldlm_interval *node;
826 struct ldlm_extent *extent;
829 LASSERT(lock->l_granted_mode == lock->l_req_mode);
831 node = lock->l_tree_node;
832 LASSERT(node != NULL);
834 idx = lock_mode_to_index(lock->l_granted_mode);
835 LASSERT(lock->l_granted_mode == 1 << idx);
836 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
838 /* node extent initialize */
839 extent = &lock->l_policy_data.l_extent;
840 interval_set(&node->li_node, extent->start, extent->end);
842 root = &res->lr_itree[idx].lit_root;
843 found = interval_insert(&node->li_node, root);
844 if (found) { /* The policy group found. */
845 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
846 LASSERT(tmp != NULL);
847 ldlm_interval_free(tmp);
848 ldlm_interval_attach(to_ldlm_interval(found), lock);
850 res->lr_itree[idx].lit_size++;
852 /* even though we use interval tree to manage the extent lock, we also
853 * add the locks into grant list, for debug purpose, .. */
854 ldlm_resource_add_lock(res, &res->lr_granted, lock);
857 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
859 struct ldlm_resource *res = lock->l_resource;
860 struct ldlm_interval *node;
861 struct ldlm_interval_tree *tree;
864 if (lock->l_granted_mode != lock->l_req_mode)
867 LASSERT(lock->l_tree_node != NULL);
868 idx = lock_mode_to_index(lock->l_granted_mode);
869 LASSERT(lock->l_granted_mode == 1 << idx);
870 tree = &res->lr_itree[idx];
872 LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
875 node = ldlm_interval_detach(lock);
877 interval_erase(&node->li_node, &tree->lit_root);
878 ldlm_interval_free(node);