1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 # include <liblustre.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_lib.h>
33 #include "ldlm_internal.h"
35 /* The purpose of this function is to return:
36 * - the maximum extent
37 * - containing the requested extent
38 * - and not overlapping existing conflicting extents outside the requested one
41 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
42 struct ldlm_extent *new_ex)
44 struct list_head *tmp;
45 ldlm_mode_t req_mode = req->l_req_mode;
46 __u64 req_start = req->l_req_extent.start;
47 __u64 req_end = req->l_req_extent.end;
50 lockmode_verify(req_mode);
52 list_for_each(tmp, queue) {
53 struct ldlm_lock *lock;
54 struct ldlm_extent *l_extent;
56 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
57 l_extent = &lock->l_policy_data.l_extent;
59 if (new_ex->start == req_start && new_ex->end == req_end) {
64 /* Don't conflict with ourselves */
68 /* If lock doesn't overlap new_ex, skip it. */
69 if (l_extent->end < new_ex->start ||
70 l_extent->start > new_ex->end)
73 /* Locks are compatible, overlap doesn't matter */
74 if (lockmode_compat(lock->l_req_mode, req_mode))
77 /* Locks conflicting in requested extents and we can't satisfy
78 * both locks, so ignore it. Either we will ping-pong this
79 * extent (we would regardless of what extent we granted) or
80 * lock is unused and it shouldn't limit our extent growth. */
81 if (lock->l_req_extent.end >= req_start &&
82 lock->l_req_extent.start <= req_end)
85 /* We grow extents downwards only as far as they don't overlap
86 * with already-granted locks, on the assumtion that clients
87 * will be writing beyond the initial requested end and would
88 * then need to enqueue a new lock beyond the previous request.
89 * We don't grow downwards if there are lots of lockers. */
90 if (l_extent->start < req_start) {
91 if (atomic_read(&req->l_resource->lr_refcount) > 20)
92 new_ex->start = req_start;
94 new_ex->start = min(l_extent->end+1, req_start);
97 /* If we need to cancel this lock anyways because our request
98 * overlaps the granted lock, we grow up to its requested
99 * extent start instead of limiting this extent, assuming that
100 * clients are writing forwards and the lock had over grown
101 * its extent downwards before we enqueued our request. */
102 if (l_extent->end > req_end) {
103 if (l_extent->start <= req_end)
104 new_ex->end = max(lock->l_req_extent.start - 1,
107 new_ex->end = max(l_extent->start - 1, req_end);
113 /* In order to determine the largest possible extent we can grant, we need
114 * to scan all of the queues. */
115 static void ldlm_extent_policy(struct ldlm_resource *res,
116 struct ldlm_lock *lock, int *flags)
118 struct ldlm_extent new_ex = { .start = 0, .end = ~0};
120 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
121 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
123 if (new_ex.start != lock->l_policy_data.l_extent.start ||
124 new_ex.end != lock->l_policy_data.l_extent.end) {
125 *flags |= LDLM_FL_LOCK_CHANGED;
126 lock->l_policy_data.l_extent.start = new_ex.start;
127 lock->l_policy_data.l_extent.end = new_ex.end;
131 /* Determine if the lock is compatible with all locks on the queue.
132 * We stop walking the queue if we hit ourselves so we don't take
133 * conflicting locks enqueued after us into accound, or we'd wait forever. */
135 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
138 struct list_head *tmp;
139 struct ldlm_lock *lock;
140 ldlm_mode_t req_mode = req->l_req_mode;
141 __u64 req_start = req->l_req_extent.start;
142 __u64 req_end = req->l_req_extent.end;
146 lockmode_verify(req_mode);
148 list_for_each(tmp, queue) {
149 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
154 /* locks are compatible, overlap doesn't matter */
155 if (lockmode_compat(lock->l_req_mode, req_mode))
158 /* if lock doesn't overlap skip it */
159 if (lock->l_policy_data.l_extent.end < req_start ||
160 lock->l_policy_data.l_extent.start > req_end)
167 if (lock->l_blocking_ast)
168 ldlm_add_ast_work_item(lock, req, NULL, 0);
174 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
175 * - blocking ASTs have already been sent
176 * - the caller has already initialized req->lr_tmp
177 * - must call this function with the ns lock held
179 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
180 * - blocking ASTs have not been sent
181 * - the caller has NOT initialized req->lr_tmp, so we must
182 * - must call this function with the ns lock held once */
183 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
186 struct ldlm_resource *res = lock->l_resource;
187 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
191 LASSERT(list_empty(&res->lr_converting));
194 LASSERT(res->lr_tmp != NULL);
195 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0);
197 RETURN(LDLM_ITER_STOP);
198 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0);
200 RETURN(LDLM_ITER_STOP);
202 ldlm_resource_unlink_lock(lock);
204 ldlm_extent_policy(res, lock, flags);
205 ldlm_grant_lock(lock, NULL, 0, 1);
206 RETURN(LDLM_ITER_CONTINUE);
210 LASSERT(res->lr_tmp == NULL);
211 res->lr_tmp = &rpc_list;
212 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1);
213 rc += ldlm_extent_compat_queue(&res->lr_waiting, lock, 1);
217 /* If either of the compat_queue()s returned 0, then we
218 * have ASTs to send and must go onto the waiting list.
220 * bug 2322: we used to unlink and re-add here, which was a
221 * terrible folly -- if we goto restart, we could get
222 * re-ordered! Causes deadlock, because ASTs aren't sent! */
223 if (list_empty(&lock->l_res_link))
224 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
225 l_unlock(&res->lr_namespace->ns_lock);
226 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
227 l_lock(&res->lr_namespace->ns_lock);
229 GOTO(restart, -ERESTART);
230 *flags |= LDLM_FL_BLOCK_GRANTED;
232 ldlm_extent_policy(res, lock, flags);
233 ldlm_resource_unlink_lock(lock);
234 ldlm_grant_lock(lock, NULL, 0, 0);
239 /* When a lock is cancelled by a client, the KMS may undergo change if this
240 * is the "highest lock". This function returns the new KMS value.
242 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
243 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
245 struct ldlm_resource *res = lock->l_resource;
246 struct list_head *tmp;
247 struct ldlm_lock *lck;
251 l_lock(&res->lr_namespace->ns_lock);
252 list_for_each(tmp, &res->lr_granted) {
253 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
257 if (lck->l_policy_data.l_extent.end >= old_kms)
258 GOTO(out, kms = old_kms);
259 kms = lck->l_policy_data.l_extent.end + 1;
264 l_unlock(&res->lr_namespace->ns_lock);