1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Peter Braam <braam@clusterfs.com>
6 * Author: Phil Schwan <phil@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LDLM
26 # include <liblustre.h>
29 #include <linux/lustre_dlm.h>
30 #include <linux/obd_support.h>
31 #include <linux/lustre_lib.h>
33 #include "ldlm_internal.h"
35 /* The purpose of this function is to return:
36 * - the maximum extent
37 * - containing the requested extent
38 * - and not overlapping existing conflicting extents outside the requested one
40 * An alternative policy is to not shrink the new extent when conflicts exist */
42 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
43 struct ldlm_extent *new_ex)
45 struct list_head *tmp;
46 ldlm_mode_t req_mode = req->l_req_mode;
47 __u64 req_start = req->l_policy_data.l_extent.start;
48 __u64 req_end = req->l_policy_data.l_extent.end;
51 if (new_ex->start == req_start && new_ex->end == req_end) {
56 list_for_each(tmp, queue) {
57 struct ldlm_lock *lock;
58 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
65 /* if lock doesn't overlap new_ex, skip it. */
66 if (lock->l_policy_data.l_extent.end < new_ex->start ||
67 lock->l_policy_data.l_extent.start > new_ex->end)
70 /* Locks are compatible, overlap doesn't matter */
71 if (lockmode_compat(lock->l_req_mode, req_mode))
74 if (lock->l_policy_data.l_extent.start < req_start) {
75 if (lock->l_policy_data.l_extent.end == ~0) {
76 new_ex->start = req_start;
77 new_ex->end = req_end;
81 new_ex->start = MIN(lock->l_policy_data.l_extent.end+1,
85 if (lock->l_policy_data.l_extent.end > req_end) {
86 if (lock->l_policy_data.l_extent.start == 0) {
87 new_ex->start = req_start;
88 new_ex->end = req_end;
92 new_ex->end = MAX(lock->l_policy_data.l_extent.start-1,
99 /* Determine if the lock is compatible with all locks on the queue. */
101 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
104 struct list_head *tmp;
105 struct ldlm_lock *lock;
106 ldlm_mode_t req_mode = req->l_req_mode;
107 __u64 req_start = req->l_policy_data.l_extent.start;
108 __u64 req_end = req->l_policy_data.l_extent.end;
112 list_for_each(tmp, queue) {
113 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
118 /* locks are compatible, overlap doesn't matter */
119 if (lockmode_compat(lock->l_req_mode, req_mode))
122 /* if lock doesn't overlap skip it */
123 if (lock->l_policy_data.l_extent.end < req_start ||
124 lock->l_policy_data.l_extent.start > req_end)
131 if (lock->l_blocking_ast)
132 ldlm_add_ast_work_item(lock, req, NULL, 0);
138 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
139 * - blocking ASTs have already been sent
140 * - the caller has already initialized req->lr_tmp
141 * - must call this function with the ns lock held
143 * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
144 * - blocking ASTs have not been sent
145 * - the caller has NOT initialized req->lr_tmp, so we must
146 * - must call this function with the ns lock held once */
147 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
150 struct ldlm_resource *res = lock->l_resource;
151 struct ldlm_extent new_ex = {0, ~0};
152 struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
156 LASSERT(list_empty(&res->lr_converting));
159 LASSERT(res->lr_tmp != NULL);
160 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0);
162 RETURN(LDLM_ITER_STOP);
163 rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0);
165 RETURN(LDLM_ITER_STOP);
167 ldlm_resource_unlink_lock(lock);
168 ldlm_grant_lock(lock, NULL, 0, 1);
169 RETURN(LDLM_ITER_CONTINUE);
172 /* In order to determine the largest possible extent we can
173 * grant, we need to scan all of the queues. */
174 ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
175 ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
177 if (new_ex.start != lock->l_policy_data.l_extent.start ||
178 new_ex.end != lock->l_policy_data.l_extent.end) {
179 *flags |= LDLM_FL_LOCK_CHANGED;
180 lock->l_policy_data.l_extent.start = new_ex.start;
181 lock->l_policy_data.l_extent.end = new_ex.end;
185 LASSERT(res->lr_tmp == NULL);
186 res->lr_tmp = &rpc_list;
187 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1);
188 rc += ldlm_extent_compat_queue(&res->lr_waiting, lock, 1);
192 /* If either of the compat_queue()s returned 0, then we
193 * have ASTs to send and must go onto the waiting list.
195 * bug 2322: we used to unlink and re-add here, which was a
196 * terrible folly -- if we goto restart, we could get
197 * re-ordered! Causes deadlock, because ASTs aren't sent! */
198 if (list_empty(&lock->l_res_link))
199 ldlm_resource_add_lock(res, &res->lr_waiting, lock);
200 l_unlock(&res->lr_namespace->ns_lock);
201 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
202 l_lock(&res->lr_namespace->ns_lock);
204 GOTO(restart, -ERESTART);
205 *flags |= LDLM_FL_BLOCK_GRANTED;
207 ldlm_resource_unlink_lock(lock);
208 ldlm_grant_lock(lock, NULL, 0, 0);