Whamcloud - gitweb
- update from b1_4_mountconf
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
5  *   Author: Peter Braam <braam@clusterfs.com>
6  *   Author: Phil Schwan <phil@clusterfs.com>
7  *
8  *   This file is part of the Lustre file system, http://www.lustre.org
9  *   Lustre is a trademark of Cluster File Systems, Inc.
10  *
11  *   You may have signed or agreed to another license before downloading
12  *   this software.  If so, you are bound by the terms and conditions
13  *   of that agreement, and the following does not apply to you.  See the
14  *   LICENSE file included with this distribution for more information.
15  *
16  *   If you did not agree to a different license, then this copy of Lustre
17  *   is open source software; you can redistribute it and/or modify it
18  *   under the terms of version 2 of the GNU General Public License as
19  *   published by the Free Software Foundation.
20  *
21  *   In either case, Lustre is distributed in the hope that it will be
22  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
23  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  *   license text for more details.
25  */
26
27 #define DEBUG_SUBSYSTEM S_LDLM
28 #ifndef __KERNEL__
29 # include <liblustre.h>
30 #endif
31
32 #include <lustre_dlm.h>
33 #include <obd_support.h>
34 #include <lustre_lib.h>
35
36 #include "ldlm_internal.h"
37
38 /* The purpose of this function is to return:
39  * - the maximum extent
40  * - containing the requested extent
41  * - and not overlapping existing conflicting extents outside the requested one
42  */
43 static void
44 ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req,
45                             struct ldlm_extent *new_ex)
46 {
47         struct list_head *tmp;
48         ldlm_mode_t req_mode = req->l_req_mode;
49         __u64 req_start = req->l_req_extent.start;
50         __u64 req_end = req->l_req_extent.end;
51         int conflicting = 0;
52         ENTRY;
53
54         lockmode_verify(req_mode);
55
56         list_for_each(tmp, queue) {
57                 struct ldlm_lock *lock;
58                 struct ldlm_extent *l_extent;
59
60                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
61                 l_extent = &lock->l_policy_data.l_extent;
62
63                 if (new_ex->start == req_start && new_ex->end == req_end) {
64                         EXIT;
65                         return;
66                 }
67
68                 /* Don't conflict with ourselves */
69                 if (req == lock)
70                         continue;
71
72                 /* Locks are compatible, overlap doesn't matter */
73                 /* Until bug 20 is fixed, try to avoid granting overlapping
74                  * locks on one client (they take a long time to cancel) */
75                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
76                     lock->l_export != req->l_export)
77                         continue;
78
79                 /* If this is a high-traffic lock, don't grow downwards at all
80                  * or grow upwards too much */
81                 ++conflicting;
82                 if (conflicting > 4)
83                         new_ex->start = req_start;
84
85                 /* If lock doesn't overlap new_ex, skip it. */
86                 if (l_extent->end < new_ex->start ||
87                     l_extent->start > new_ex->end)
88                         continue;
89
90                 /* Locks conflicting in requested extents and we can't satisfy
91                  * both locks, so ignore it.  Either we will ping-pong this
92                  * extent (we would regardless of what extent we granted) or
93                  * lock is unused and it shouldn't limit our extent growth. */
94                 if (lock->l_req_extent.end >= req_start &&
95                     lock->l_req_extent.start <= req_end)
96                         continue;
97
98                 /* We grow extents downwards only as far as they don't overlap
99                  * with already-granted locks, on the assumtion that clients
100                  * will be writing beyond the initial requested end and would
101                  * then need to enqueue a new lock beyond previous request.
102                  * l_req_extent->end strictly < req_start, checked above. */
103                 if (l_extent->start < req_start && new_ex->start != req_start) {
104                         if (l_extent->end >= req_start)
105                                 new_ex->start = req_start;
106                         else
107                                 new_ex->start = min(l_extent->end+1, req_start);
108                 }
109
110                 /* If we need to cancel this lock anyways because our request
111                  * overlaps the granted lock, we grow up to its requested
112                  * extent start instead of limiting this extent, assuming that
113                  * clients are writing forwards and the lock had over grown
114                  * its extent downwards before we enqueued our request. */
115                 if (l_extent->end > req_end) {
116                         if (l_extent->start <= req_end)
117                                 new_ex->end = max(lock->l_req_extent.start - 1,
118                                                   req_end);
119                         else
120                                 new_ex->end = max(l_extent->start - 1, req_end);
121                 }
122         }
123
124 #define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
125         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
126                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
127                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
128                                           new_ex->end);
129         }
130         EXIT;
131 }
132
133 /* In order to determine the largest possible extent we can grant, we need
134  * to scan all of the queues. */
135 static void ldlm_extent_policy(struct ldlm_resource *res,
136                                struct ldlm_lock *lock, int *flags)
137 {
138         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
139
140         if (lock->l_export == NULL)
141                 /*
142                  * this is local lock taken by server (e.g., as a part of
143                  * OST-side locking, or unlink handling). Expansion doesn't
144                  * make a lot of sense for local locks, because they are
145                  * dropped immediately on operation completion and would only
146                  * conflict with other threads.
147                  */
148                 return;
149
150         if (lock->l_policy_data.l_extent.start == 0 &&
151             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
152                 /* fast-path whole file locks */
153                 return;
154
155         ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex);
156         ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex);
157
158         if (new_ex.start != lock->l_policy_data.l_extent.start ||
159             new_ex.end != lock->l_policy_data.l_extent.end) {
160                 *flags |= LDLM_FL_LOCK_CHANGED;
161                 lock->l_policy_data.l_extent.start = new_ex.start;
162                 lock->l_policy_data.l_extent.end = new_ex.end;
163         }
164 }
165
166 /* Determine if the lock is compatible with all locks on the queue.
167  * We stop walking the queue if we hit ourselves so we don't take
168  * conflicting locks enqueued after us into accound, or we'd wait forever.
169  *
170  * 0 if the lock is not compatible
171  * 1 if the lock is compatible
172  * 2 if this group lock is compatible and requires no further checking
173  * negative error, such as EWOULDBLOCK for group locks
174  */
175 static int
176 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
177                          int send_cbs, int *flags, ldlm_error_t *err)
178 {
179         struct list_head *tmp;
180         struct ldlm_lock *lock;
181         ldlm_mode_t req_mode = req->l_req_mode;
182         __u64 req_start = req->l_req_extent.start;
183         __u64 req_end = req->l_req_extent.end;
184         int compat = 1;
185         int scan = 0;
186         ENTRY;
187
188         lockmode_verify(req_mode);
189
190         list_for_each(tmp, queue) {
191                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
192
193                 if (req == lock)
194                         RETURN(compat);
195
196                 if (unlikely(scan)) {
197                         /* We only get here if we are queuing GROUP lock
198                            and met some incompatible one. The main idea of this
199                            code is to insert GROUP lock past compatible GROUP
200                            lock in the waiting queue or if there is not any,
201                            then in front of first non-GROUP lock */
202                         if (lock->l_req_mode != LCK_GROUP) {
203                         /* Ok, we hit non-GROUP lock, there should be no
204                            more GROUP locks later on, queue in front of
205                            first non-GROUP lock */
206
207                                 ldlm_resource_insert_lock_after(lock, req);
208                                 list_del_init(&lock->l_res_link);
209                                 ldlm_resource_insert_lock_after(req, lock);
210                                 RETURN(0);
211                         }
212                         if (req->l_policy_data.l_extent.gid ==
213                              lock->l_policy_data.l_extent.gid) {
214                                 /* found it */
215                                 ldlm_resource_insert_lock_after(lock, req);
216                                 RETURN(0);
217                         }
218                         continue;
219                 }
220
221                 /* locks are compatible, overlap doesn't matter */
222                 if (lockmode_compat(lock->l_req_mode, req_mode)) {
223                         /* non-group locks are compatible, overlap doesn't
224                            matter */
225                         if (likely(req_mode != LCK_GROUP))
226                                 continue;
227
228                         /* If we are trying to get a GROUP lock and there is
229                            another one of this kind, we need to compare gid */
230                         if (req->l_policy_data.l_extent.gid ==
231                             lock->l_policy_data.l_extent.gid) {
232                                 /* If existing lock with matched gid is granted,
233                                    we grant new one too. */
234                                 if (lock->l_req_mode == lock->l_granted_mode)
235                                         RETURN(2);
236
237                                 /* Otherwise we are scanning queue of waiting
238                                  * locks and it means current request would
239                                  * block along with existing lock (that is
240                                  * already blocked.
241                                  * If we are in nonblocking mode - return
242                                  * immediately */
243                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
244                                         compat = -EWOULDBLOCK;
245                                         goto destroylock;
246                                 }
247                                 /* If this group lock is compatible with another
248                                  * group lock on the waiting list, they must be
249                                  * together in the list, so they can be granted
250                                  * at the same time.  Otherwise the later lock
251                                  * can get stuck behind another, incompatible,
252                                  * lock. */
253                                 ldlm_resource_insert_lock_after(lock, req);
254                                 /* Because 'lock' is not granted, we can stop
255                                  * processing this queue and return immediately.
256                                  * There is no need to check the rest of the
257                                  * list. */
258                                 RETURN(0);
259                         }
260                 }
261
262                 if (unlikely(req_mode == LCK_GROUP &&
263                     (lock->l_req_mode != lock->l_granted_mode))) {
264                         scan = 1;
265                         compat = 0;
266                         if (lock->l_req_mode != LCK_GROUP) {
267                         /* Ok, we hit non-GROUP lock, there should be no
268                            more GROUP locks later on, queue in front of
269                            first non-GROUP lock */
270
271                                 ldlm_resource_insert_lock_after(lock, req);
272                                 list_del_init(&lock->l_res_link);
273                                 ldlm_resource_insert_lock_after(req, lock);
274                                 RETURN(0);
275                         }
276                         if (req->l_policy_data.l_extent.gid ==
277                              lock->l_policy_data.l_extent.gid) {
278                                 /* found it */
279                                 ldlm_resource_insert_lock_after(lock, req);
280                                 RETURN(0);
281                         }
282                         continue;
283                 }
284
285                 if (unlikely(lock->l_req_mode == LCK_GROUP)) {
286                         /* If compared lock is GROUP,then requested is PR/PW/ =>
287                          * this is not compatible; extent range does not
288                          * matter */
289                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
290                                 compat = -EWOULDBLOCK;
291                                 goto destroylock;
292                         } else {
293                                 *flags |= LDLM_FL_NO_TIMEOUT;
294                         }
295                 } else if (lock->l_policy_data.l_extent.end < req_start ||
296                            lock->l_policy_data.l_extent.start > req_end) {
297                         /* if a non group lock doesn't overlap skip it */
298                         continue;
299                 }
300
301                 if (!send_cbs)
302                         RETURN(0);
303
304                 compat = 0;
305                 if (lock->l_blocking_ast)
306                         ldlm_add_ast_work_item(lock, req, NULL, 0);
307         }
308
309         RETURN(compat);
310 destroylock:
311         list_del_init(&req->l_res_link);
312         ldlm_lock_destroy(req);
313         *err = compat;
314         RETURN(compat);
315 }
316
317 /* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
318   *   - blocking ASTs have already been sent
319   *   - the caller has already initialized req->lr_tmp
320   *   - must call this function with the ns lock held
321   *
322   * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
323   *   - blocking ASTs have not been sent
324   *   - the caller has NOT initialized req->lr_tmp, so we must
325   *   - must call this function with the ns lock held once */
326 int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq,
327                              ldlm_error_t *err)
328 {
329         struct ldlm_resource *res = lock->l_resource;
330         struct list_head rpc_list = CFS_LIST_HEAD_INIT(rpc_list);
331         int rc, rc2;
332         ENTRY;
333
334         LASSERT(list_empty(&res->lr_converting));
335         *err = ELDLM_OK;
336
337         if (!first_enq) {
338                 /* Careful observers will note that we don't handle -EWOULDBLOCK
339                  * here, but it's ok for a non-obvious reason -- compat_queue
340                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
341                  * flags should always be zero here, and if that ever stops
342                  * being true, we want to find out. */
343                 LASSERT(*flags == 0);
344                 LASSERT(res->lr_tmp != NULL);
345                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0, flags,
346                                               err);
347                 if (rc == 1) {
348                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0,
349                                                       flags, err);
350                 }
351                 if (rc == 0)
352                         RETURN(LDLM_ITER_STOP);
353
354                 ldlm_resource_unlink_lock(lock);
355
356                 ldlm_extent_policy(res, lock, flags);
357                 ldlm_grant_lock(lock, NULL, 0, 1);
358                 RETURN(LDLM_ITER_CONTINUE);
359         }
360
361  restart:
362         LASSERT(res->lr_tmp == NULL);
363         res->lr_tmp = &rpc_list;
364         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1, flags, err);
365         if (rc < 0)
366                 GOTO(out, rc); /* lock was destroyed */
367         if (rc == 2) {
368                 res->lr_tmp = NULL;
369                 goto grant;
370         }
371
372         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, 1, flags, err);
373         if (rc2 < 0)
374                 GOTO(out, rc = rc2); /* lock was destroyed */
375         res->lr_tmp = NULL;
376
377         if (rc + rc2 == 2) {
378         grant:
379                 ldlm_extent_policy(res, lock, flags);
380                 ldlm_resource_unlink_lock(lock);
381                 ldlm_grant_lock(lock, NULL, 0, 0);
382         } else {
383                 /* If either of the compat_queue()s returned failure, then we
384                  * have ASTs to send and must go onto the waiting list.
385                  *
386                  * bug 2322: we used to unlink and re-add here, which was a
387                  * terrible folly -- if we goto restart, we could get
388                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
389                 if (list_empty(&lock->l_res_link))
390                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
391                 l_unlock(&res->lr_namespace->ns_lock);
392                 rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
393                 l_lock(&res->lr_namespace->ns_lock);
394                 if (rc == -ERESTART)
395                         GOTO(restart, -ERESTART);
396                 *flags |= LDLM_FL_BLOCK_GRANTED;
397         }
398         rc = 0;
399 out:
400         res->lr_tmp = NULL;
401         RETURN(rc);
402 }
403
404 /* When a lock is cancelled by a client, the KMS may undergo change if this
405  * is the "highest lock".  This function returns the new KMS value.
406  * Caller must hold ns_lock already.
407  *
408  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
409 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
410 {
411         struct ldlm_resource *res = lock->l_resource;
412         struct list_head *tmp;
413         struct ldlm_lock *lck;
414         __u64 kms = 0;
415         ENTRY;
416
417         /* don't let another thread in ldlm_extent_shift_kms race in
418          * just after we finish and take our lock into account in its
419          * calculation of the kms */
420         lock->l_flags |= LDLM_FL_KMS_IGNORE;
421
422         list_for_each(tmp, &res->lr_granted) {
423                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
424
425                 if (lck->l_flags & LDLM_FL_KMS_IGNORE)
426                         continue;
427
428                 if (lck->l_policy_data.l_extent.end >= old_kms)
429                         RETURN(old_kms);
430
431                 /* This extent _has_ to be smaller than old_kms (checked above)
432                  * so kms can only ever be smaller or the same as old_kms. */
433                 if (lck->l_policy_data.l_extent.end + 1 > kms)
434                         kms = lck->l_policy_data.l_extent.end + 1;
435         }
436         LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
437
438         RETURN(kms);
439 }