Whamcloud - gitweb
LU-2675 build: assume __linux__ and __KERNEL__
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_extent.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 /**
43  * This file contains implementation of EXTENT lock type
44  *
45  * EXTENT lock type is for locking a contiguous range of values, represented
46  * by 64-bit starting and ending offsets (inclusive). There are several extent
47  * lock modes, some of which may be mutually incompatible. Extent locks are
48  * considered incompatible if their modes are incompatible and their extents
49  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
50  */
51
52 #define DEBUG_SUBSYSTEM S_LDLM
53
54 #include <libcfs/libcfs.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
57 #include <obd.h>
58 #include <obd_class.h>
59 #include <lustre_lib.h>
60
61 #include "ldlm_internal.h"
62
63 #ifdef HAVE_SERVER_SUPPORT
64 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
65
66 /**
67  * Fix up the ldlm_extent after expanding it.
68  *
69  * After expansion has been done, we might still want to do certain adjusting
70  * based on overall contention of the resource and the like to avoid granting
71  * overly wide locks.
72  */
73 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
74                                               struct ldlm_extent *new_ex,
75                                               int conflicting)
76 {
77         ldlm_mode_t req_mode = req->l_req_mode;
78         __u64 req_start = req->l_req_extent.start;
79         __u64 req_end = req->l_req_extent.end;
80         __u64 req_align, mask;
81
82         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
83                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
84                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
85                                           new_ex->end);
86         }
87
88         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
89                 EXIT;
90                 return;
91         }
92
93         /* we need to ensure that the lock extent is properly aligned to what
94          * the client requested. Also we need to make sure it's also server
95          * page size aligned otherwise a server page can be covered by two
96          * write locks. */
97         mask = PAGE_CACHE_SIZE;
98         req_align = (req_end + 1) | req_start;
99         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
100                 while ((req_align & mask) == 0)
101                         mask <<= 1;
102         }
103         mask -= 1;
104         /* We can only shrink the lock, not grow it.
105          * This should never cause lock to be smaller than requested,
106          * since requested lock was already aligned on these boundaries. */
107         new_ex->start = ((new_ex->start - 1) | mask) + 1;
108         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
109         LASSERTF(new_ex->start <= req_start,
110                  "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
111                  mask, new_ex->start, req_start);
112         LASSERTF(new_ex->end >= req_end,
113                  "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
114                  mask, new_ex->end, req_end);
115 }
116
117 /**
118  * Return the maximum extent that:
119  * - contains the requested extent
120  * - does not overlap existing conflicting extents outside the requested one
121  *
122  * This allows clients to request a small required extent range, but if there
123  * is no contention on the lock the full lock can be granted to the client.
124  * This avoids the need for many smaller lock requests to be granted in the
125  * common (uncontended) case.
126  *
127  * Use interval tree to expand the lock extent for granted lock.
128  */
129 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
130                                                 struct ldlm_extent *new_ex)
131 {
132         struct ldlm_resource *res = req->l_resource;
133         ldlm_mode_t req_mode = req->l_req_mode;
134         __u64 req_start = req->l_req_extent.start;
135         __u64 req_end = req->l_req_extent.end;
136         struct ldlm_interval_tree *tree;
137         struct interval_node_extent limiter = { new_ex->start, new_ex->end };
138         int conflicting = 0;
139         int idx;
140         ENTRY;
141
142         lockmode_verify(req_mode);
143
144         /* Using interval tree to handle the LDLM extent granted locks. */
145         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
146                 struct interval_node_extent ext = { req_start, req_end };
147
148                 tree = &res->lr_itree[idx];
149                 if (lockmode_compat(tree->lit_mode, req_mode))
150                         continue;
151
152                 conflicting += tree->lit_size;
153                 if (conflicting > 4)
154                         limiter.start = req_start;
155
156                 if (interval_is_overlapped(tree->lit_root, &ext))
157                         CDEBUG(D_INFO, 
158                                "req_mode = %d, tree->lit_mode = %d, "
159                                "tree->lit_size = %d\n",
160                                req_mode, tree->lit_mode, tree->lit_size);
161                 interval_expand(tree->lit_root, &ext, &limiter);
162                 limiter.start = max(limiter.start, ext.start);
163                 limiter.end = min(limiter.end, ext.end);
164                 if (limiter.start == req_start && limiter.end == req_end)
165                         break;
166         }
167
168         new_ex->start = limiter.start;
169         new_ex->end = limiter.end;
170         LASSERT(new_ex->start <= req_start);
171         LASSERT(new_ex->end >= req_end);
172
173         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
174         EXIT;
175 }
176
177 /* The purpose of this function is to return:
178  * - the maximum extent
179  * - containing the requested extent
180  * - and not overlapping existing conflicting extents outside the requested one
181  */
182 static void
183 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
184                                     struct ldlm_extent *new_ex)
185 {
186         struct ldlm_resource *res = req->l_resource;
187         ldlm_mode_t req_mode = req->l_req_mode;
188         __u64 req_start = req->l_req_extent.start;
189         __u64 req_end = req->l_req_extent.end;
190         struct ldlm_lock *lock;
191         int conflicting = 0;
192         ENTRY;
193
194         lockmode_verify(req_mode);
195
196         /* for waiting locks */
197         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
198                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
199
200                 /* We already hit the minimum requested size, search no more */
201                 if (new_ex->start == req_start && new_ex->end == req_end) {
202                         EXIT;
203                         return;
204                 }
205
206                 /* Don't conflict with ourselves */
207                 if (req == lock)
208                         continue;
209
210                 /* Locks are compatible, overlap doesn't matter */
211                 /* Until bug 20 is fixed, try to avoid granting overlapping
212                  * locks on one client (they take a long time to cancel) */
213                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
214                     lock->l_export != req->l_export)
215                         continue;
216
217                 /* If this is a high-traffic lock, don't grow downwards at all
218                  * or grow upwards too much */
219                 ++conflicting;
220                 if (conflicting > 4)
221                         new_ex->start = req_start;
222
223                 /* If lock doesn't overlap new_ex, skip it. */
224                 if (!ldlm_extent_overlap(l_extent, new_ex))
225                         continue;
226
227                 /* Locks conflicting in requested extents and we can't satisfy
228                  * both locks, so ignore it.  Either we will ping-pong this
229                  * extent (we would regardless of what extent we granted) or
230                  * lock is unused and it shouldn't limit our extent growth. */
231                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
232                         continue;
233
234                 /* We grow extents downwards only as far as they don't overlap
235                  * with already-granted locks, on the assumption that clients
236                  * will be writing beyond the initial requested end and would
237                  * then need to enqueue a new lock beyond previous request.
238                  * l_req_extent->end strictly < req_start, checked above. */
239                 if (l_extent->start < req_start && new_ex->start != req_start) {
240                         if (l_extent->end >= req_start)
241                                 new_ex->start = req_start;
242                         else
243                                 new_ex->start = min(l_extent->end+1, req_start);
244                 }
245
246                 /* If we need to cancel this lock anyways because our request
247                  * overlaps the granted lock, we grow up to its requested
248                  * extent start instead of limiting this extent, assuming that
249                  * clients are writing forwards and the lock had over grown
250                  * its extent downwards before we enqueued our request. */
251                 if (l_extent->end > req_end) {
252                         if (l_extent->start <= req_end)
253                                 new_ex->end = max(lock->l_req_extent.start - 1,
254                                                   req_end);
255                         else
256                                 new_ex->end = max(l_extent->start - 1, req_end);
257                 }
258         }
259
260         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
261         EXIT;
262 }
263
264
265 /* In order to determine the largest possible extent we can grant, we need
266  * to scan all of the queues. */
267 static void ldlm_extent_policy(struct ldlm_resource *res,
268                                struct ldlm_lock *lock, __u64 *flags)
269 {
270         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
271
272         if (lock->l_export == NULL)
273                 /*
274                  * this is local lock taken by server (e.g., as a part of
275                  * OST-side locking, or unlink handling). Expansion doesn't
276                  * make a lot of sense for local locks, because they are
277                  * dropped immediately on operation completion and would only
278                  * conflict with other threads.
279                  */
280                 return;
281
282         if (lock->l_policy_data.l_extent.start == 0 &&
283             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
284                 /* fast-path whole file locks */
285                 return;
286
287         ldlm_extent_internal_policy_granted(lock, &new_ex);
288         ldlm_extent_internal_policy_waiting(lock, &new_ex);
289
290         if (new_ex.start != lock->l_policy_data.l_extent.start ||
291             new_ex.end != lock->l_policy_data.l_extent.end) {
292                 *flags |= LDLM_FL_LOCK_CHANGED;
293                 lock->l_policy_data.l_extent.start = new_ex.start;
294                 lock->l_policy_data.l_extent.end = new_ex.end;
295         }
296 }
297
298 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
299 {
300         struct ldlm_resource *res = lock->l_resource;
301         cfs_time_t now = cfs_time_current();
302
303         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
304                 return 1;
305
306         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
307         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
308                 res->lr_contention_time = now;
309         return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
310                 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
311 }
312
313 struct ldlm_extent_compat_args {
314         struct list_head *work_list;
315         struct ldlm_lock *lock;
316         ldlm_mode_t mode;
317         int *locks;
318         int *compat;
319 };
320
321 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
322                                                 void *data)
323 {
324         struct ldlm_extent_compat_args *priv = data;
325         struct ldlm_interval *node = to_ldlm_interval(n);
326         struct ldlm_extent *extent;
327         struct list_head *work_list = priv->work_list;
328         struct ldlm_lock *lock, *enq = priv->lock;
329         ldlm_mode_t mode = priv->mode;
330         int count = 0;
331         ENTRY;
332
333         LASSERT(!list_empty(&node->li_group));
334
335         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
336                 /* interval tree is for granted lock */
337                 LASSERTF(mode == lock->l_granted_mode,
338                          "mode = %s, lock->l_granted_mode = %s\n",
339                          ldlm_lockname[mode],
340                          ldlm_lockname[lock->l_granted_mode]);
341                 count++;
342                 if (lock->l_blocking_ast)
343                         ldlm_add_ast_work_item(lock, enq, work_list);
344         }
345
346         /* don't count conflicting glimpse locks */
347         extent = ldlm_interval_extent(node);
348         if (!(mode == LCK_PR &&
349             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
350                 *priv->locks += count;
351
352         if (priv->compat)
353                 *priv->compat = 0;
354
355         RETURN(INTERVAL_ITER_CONT);
356 }
357
358 /**
359  * Determine if the lock is compatible with all locks on the queue.
360  *
361  * If \a work_list is provided, conflicting locks are linked there.
362  * If \a work_list is not provided, we exit this function on first conflict.
363  *
364  * \retval 0 if the lock is not compatible
365  * \retval 1 if the lock is compatible
366  * \retval 2 if \a req is a group lock and it is compatible and requires
367  *           no further checking
368  * \retval negative error, such as EWOULDBLOCK for group locks
369  */
370 static int
371 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
372                          __u64 *flags, ldlm_error_t *err,
373                          struct list_head *work_list, int *contended_locks)
374 {
375         struct ldlm_resource *res = req->l_resource;
376         ldlm_mode_t req_mode = req->l_req_mode;
377         __u64 req_start = req->l_req_extent.start;
378         __u64 req_end = req->l_req_extent.end;
379         struct ldlm_lock *lock;
380         int check_contention;
381         int compat = 1;
382         int scan = 0;
383         ENTRY;
384
385         lockmode_verify(req_mode);
386
387         /* Using interval tree for granted lock */
388         if (queue == &res->lr_granted) {
389                 struct ldlm_interval_tree *tree;
390                 struct ldlm_extent_compat_args data = {.work_list = work_list,
391                                                .lock = req,
392                                                .locks = contended_locks,
393                                                .compat = &compat };
394                 struct interval_node_extent ex = { .start = req_start,
395                                                    .end = req_end };
396                 int idx, rc;
397
398                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
399                         tree = &res->lr_itree[idx];
400                         if (tree->lit_root == NULL) /* empty tree, skipped */
401                                 continue;
402
403                         data.mode = tree->lit_mode;
404                         if (lockmode_compat(req_mode, tree->lit_mode)) {
405                                 struct ldlm_interval *node;
406                                 struct ldlm_extent *extent;
407
408                                 if (req_mode != LCK_GROUP)
409                                         continue;
410
411                                 /* group lock, grant it immediately if
412                                  * compatible */
413                                 node = to_ldlm_interval(tree->lit_root);
414                                 extent = ldlm_interval_extent(node);
415                                 if (req->l_policy_data.l_extent.gid ==
416                                     extent->gid)
417                                         RETURN(2);
418                         }
419
420                         if (tree->lit_mode == LCK_GROUP) {
421                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
422                                         compat = -EWOULDBLOCK;
423                                         goto destroylock;
424                                 }
425
426                                 *flags |= LDLM_FL_NO_TIMEOUT;
427                                 if (!work_list)
428                                         RETURN(0);
429
430                                 /* if work list is not NULL,add all
431                                    locks in the tree to work list */
432                                 compat = 0;
433                                 interval_iterate(tree->lit_root,
434                                                  ldlm_extent_compat_cb, &data);
435                                 continue;
436                         }
437
438                         if (!work_list) {
439                                 rc = interval_is_overlapped(tree->lit_root,&ex);
440                                 if (rc)
441                                         RETURN(0);
442                         } else {
443                                 interval_search(tree->lit_root, &ex,
444                                                 ldlm_extent_compat_cb, &data);
445                                 if (!list_empty(work_list) && compat)
446                                         compat = 0;
447                         }
448                 }
449         } else { /* for waiting queue */
450                 list_for_each_entry(lock, queue, l_res_link) {
451                         check_contention = 1;
452
453                         /* We stop walking the queue if we hit ourselves so
454                          * we don't take conflicting locks enqueued after us
455                          * into account, or we'd wait forever. */
456                         if (req == lock)
457                                 break;
458
459                         if (unlikely(scan)) {
460                                 /* We only get here if we are queuing GROUP lock
461                                    and met some incompatible one. The main idea of this
462                                    code is to insert GROUP lock past compatible GROUP
463                                    lock in the waiting queue or if there is not any,
464                                    then in front of first non-GROUP lock */
465                                 if (lock->l_req_mode != LCK_GROUP) {
466                                         /* Ok, we hit non-GROUP lock, there should
467                                          * be no more GROUP locks later on, queue in
468                                          * front of first non-GROUP lock */
469
470                                         ldlm_resource_insert_lock_after(lock, req);
471                                         list_del_init(&lock->l_res_link);
472                                         ldlm_resource_insert_lock_after(req, lock);
473                                         compat = 0;
474                                         break;
475                                 }
476                                 if (req->l_policy_data.l_extent.gid ==
477                                     lock->l_policy_data.l_extent.gid) {
478                                         /* found it */
479                                         ldlm_resource_insert_lock_after(lock, req);
480                                         compat = 0;
481                                         break;
482                                 }
483                                 continue;
484                         }
485
486                         /* locks are compatible, overlap doesn't matter */
487                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
488                                 if (req_mode == LCK_PR &&
489                                     ((lock->l_policy_data.l_extent.start <=
490                                       req->l_policy_data.l_extent.start) &&
491                                      (lock->l_policy_data.l_extent.end >=
492                                       req->l_policy_data.l_extent.end))) {
493                                         /* If we met a PR lock just like us or
494                                            wider, and nobody down the list
495                                            conflicted with it, that means we
496                                            can skip processing of the rest of
497                                            the list and safely place ourselves
498                                            at the end of the list, or grant
499                                            (dependent if we met an conflicting
500                                            locks before in the list).  In case
501                                            of 1st enqueue only we continue
502                                            traversing if there is something
503                                            conflicting down the list because
504                                            we need to make sure that something
505                                            is marked as AST_SENT as well, in
506                                            cse of empy worklist we would exit
507                                            on first conflict met. */
508                                         /* There IS a case where such flag is
509                                            not set for a lock, yet it blocks
510                                            something. Luckily for us this is
511                                            only during destroy, so lock is
512                                            exclusive. So here we are safe */
513                                         if (!ldlm_is_ast_sent(lock))
514                                                 RETURN(compat);
515                                 }
516
517                                 /* non-group locks are compatible, overlap doesn't
518                                    matter */
519                                 if (likely(req_mode != LCK_GROUP))
520                                         continue;
521
522                                 /* If we are trying to get a GROUP lock and there is
523                                    another one of this kind, we need to compare gid */
524                                 if (req->l_policy_data.l_extent.gid ==
525                                     lock->l_policy_data.l_extent.gid) {
526                                         /* If existing lock with matched gid is granted,
527                                            we grant new one too. */
528                                         if (lock->l_req_mode == lock->l_granted_mode)
529                                                 RETURN(2);
530
531                                         /* Otherwise we are scanning queue of waiting
532                                          * locks and it means current request would
533                                          * block along with existing lock (that is
534                                          * already blocked.
535                                          * If we are in nonblocking mode - return
536                                          * immediately */
537                                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
538                                                 compat = -EWOULDBLOCK;
539                                                 goto destroylock;
540                                         }
541                                         /* If this group lock is compatible with another
542                                          * group lock on the waiting list, they must be
543                                          * together in the list, so they can be granted
544                                          * at the same time.  Otherwise the later lock
545                                          * can get stuck behind another, incompatible,
546                                          * lock. */
547                                         ldlm_resource_insert_lock_after(lock, req);
548                                         /* Because 'lock' is not granted, we can stop
549                                          * processing this queue and return immediately.
550                                          * There is no need to check the rest of the
551                                          * list. */
552                                         RETURN(0);
553                                 }
554                         }
555
556                         if (unlikely(req_mode == LCK_GROUP &&
557                                      (lock->l_req_mode != lock->l_granted_mode))) {
558                                 scan = 1;
559                                 compat = 0;
560                                 if (lock->l_req_mode != LCK_GROUP) {
561                                         /* Ok, we hit non-GROUP lock, there should be no
562                                            more GROUP locks later on, queue in front of
563                                            first non-GROUP lock */
564
565                                         ldlm_resource_insert_lock_after(lock, req);
566                                         list_del_init(&lock->l_res_link);
567                                         ldlm_resource_insert_lock_after(req, lock);
568                                         break;
569                                 }
570                                 if (req->l_policy_data.l_extent.gid ==
571                                     lock->l_policy_data.l_extent.gid) {
572                                         /* found it */
573                                         ldlm_resource_insert_lock_after(lock, req);
574                                         break;
575                                 }
576                                 continue;
577                         }
578
579                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
580                                 /* If compared lock is GROUP, then requested is PR/PW/
581                                  * so this is not compatible; extent range does not
582                                  * matter */
583                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
584                                         compat = -EWOULDBLOCK;
585                                         goto destroylock;
586                                 } else {
587                                         *flags |= LDLM_FL_NO_TIMEOUT;
588                                 }
589                         } else if (lock->l_policy_data.l_extent.end < req_start ||
590                                    lock->l_policy_data.l_extent.start > req_end) {
591                                 /* if a non group lock doesn't overlap skip it */
592                                 continue;
593                         } else if (lock->l_req_extent.end < req_start ||
594                                    lock->l_req_extent.start > req_end) {
595                                 /* false contention, the requests doesn't really overlap */
596                                 check_contention = 0;
597                         }
598
599                         if (!work_list)
600                                 RETURN(0);
601
602                         /* don't count conflicting glimpse locks */
603                         if (lock->l_req_mode == LCK_PR &&
604                             lock->l_policy_data.l_extent.start == 0 &&
605                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
606                                 check_contention = 0;
607
608                         *contended_locks += check_contention;
609
610                         compat = 0;
611                         if (lock->l_blocking_ast)
612                                 ldlm_add_ast_work_item(lock, req, work_list);
613                 }
614         }
615
616         if (ldlm_check_contention(req, *contended_locks) &&
617             compat == 0 &&
618             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
619             req->l_req_mode != LCK_GROUP &&
620             req_end - req_start <=
621             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
622                 GOTO(destroylock, compat = -EUSERS);
623
624         RETURN(compat);
625 destroylock:
626         list_del_init(&req->l_res_link);
627         ldlm_lock_destroy_nolock(req);
628         *err = compat;
629         RETURN(compat);
630 }
631
632 /**
633  * Discard all AST work items from list.
634  *
635  * If for whatever reason we do not want to send ASTs to conflicting locks
636  * anymore, disassemble the list with this function.
637  */
638 static void discard_bl_list(struct list_head *bl_list)
639 {
640         struct list_head *tmp, *pos;
641         ENTRY;
642
643         list_for_each_safe(pos, tmp, bl_list) {
644                 struct ldlm_lock *lock =
645                         list_entry(pos, struct ldlm_lock, l_bl_ast);
646
647                 list_del_init(&lock->l_bl_ast);
648                 LASSERT(ldlm_is_ast_sent(lock));
649                 ldlm_clear_ast_sent(lock);
650                 LASSERT(lock->l_bl_ast_run == 0);
651                 LASSERT(lock->l_blocking_lock);
652                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
653                 lock->l_blocking_lock = NULL;
654                 LDLM_LOCK_RELEASE(lock);
655         }
656         EXIT;
657 }
658
659 /**
660  * Process a granting attempt for extent lock.
661  * Must be called with ns lock held.
662  *
663  * This function looks for any conflicts for \a lock in the granted or
664  * waiting queues. The lock is granted if no conflicts are found in
665  * either queue.
666  *
667  * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
668  *   - blocking ASTs have already been sent
669  *
670  * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
671  *   - blocking ASTs have not been sent yet, so list of conflicting locks
672  *     would be collected and ASTs sent.
673  */
674 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
675                              int first_enq, ldlm_error_t *err,
676                              struct list_head *work_list)
677 {
678         struct ldlm_resource *res = lock->l_resource;
679         struct list_head rpc_list;
680         int rc, rc2;
681         int contended_locks = 0;
682         ENTRY;
683
684         LASSERT(lock->l_granted_mode != lock->l_req_mode);
685         LASSERT(list_empty(&res->lr_converting));
686         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
687                 !ldlm_is_ast_discard_data(lock));
688         INIT_LIST_HEAD(&rpc_list);
689         check_res_locked(res);
690         *err = ELDLM_OK;
691
692         if (!first_enq) {
693                 /* Careful observers will note that we don't handle -EWOULDBLOCK
694                  * here, but it's ok for a non-obvious reason -- compat_queue
695                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
696                  * flags should always be zero here, and if that ever stops
697                  * being true, we want to find out. */
698                 LASSERT(*flags == 0);
699                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
700                                               err, NULL, &contended_locks);
701                 if (rc == 1) {
702                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
703                                                       flags, err, NULL,
704                                                       &contended_locks);
705                 }
706                 if (rc == 0)
707                         RETURN(LDLM_ITER_STOP);
708
709                 ldlm_resource_unlink_lock(lock);
710
711                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
712                         ldlm_extent_policy(res, lock, flags);
713                 ldlm_grant_lock(lock, work_list);
714                 RETURN(LDLM_ITER_CONTINUE);
715         }
716
717  restart:
718         contended_locks = 0;
719         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
720                                       &rpc_list, &contended_locks);
721         if (rc < 0)
722                 GOTO(out, rc); /* lock was destroyed */
723         if (rc == 2)
724                 goto grant;
725
726         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
727                                        &rpc_list, &contended_locks);
728         if (rc2 < 0)
729                 GOTO(out, rc = rc2); /* lock was destroyed */
730
731         if (rc + rc2 == 2) {
732         grant:
733                 ldlm_extent_policy(res, lock, flags);
734                 ldlm_resource_unlink_lock(lock);
735                 ldlm_grant_lock(lock, NULL);
736         } else {
737                 /* If either of the compat_queue()s returned failure, then we
738                  * have ASTs to send and must go onto the waiting list.
739                  *
740                  * bug 2322: we used to unlink and re-add here, which was a
741                  * terrible folly -- if we goto restart, we could get
742                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
743                 if (list_empty(&lock->l_res_link))
744                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
745                 unlock_res(res);
746                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
747                                        LDLM_WORK_BL_AST);
748
749                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
750                     !ns_is_client(ldlm_res_to_ns(res)))
751                         class_fail_export(lock->l_export);
752
753                 lock_res(res);
754                 if (rc == -ERESTART) {
755                         /* 15715: The lock was granted and destroyed after
756                          * resource lock was dropped. Interval node was freed
757                          * in ldlm_lock_destroy. Anyway, this always happens
758                          * when a client is being evicted. So it would be
759                          * ok to return an error. -jay */
760                         if (ldlm_is_destroyed(lock)) {
761                                 *err = -EAGAIN;
762                                 GOTO(out, rc = -EAGAIN);
763                         }
764
765                         /* lock was granted while resource was unlocked. */
766                         if (lock->l_granted_mode == lock->l_req_mode) {
767                                 /* bug 11300: if the lock has been granted,
768                                  * break earlier because otherwise, we will go
769                                  * to restart and ldlm_resource_unlink will be
770                                  * called and it causes the interval node to be
771                                  * freed. Then we will fail at
772                                  * ldlm_extent_add_lock() */
773                                 *flags &= ~LDLM_FL_BLOCKED_MASK;
774                                 GOTO(out, rc = 0);
775                         }
776
777                         GOTO(restart, rc);
778                 }
779
780                 /* this way we force client to wait for the lock
781                  * endlessly once the lock is enqueued -bzzz */
782                 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
783
784         }
785         RETURN(0);
786 out:
787         if (!list_empty(&rpc_list)) {
788                 LASSERT(!ldlm_is_ast_discard_data(lock));
789                 discard_bl_list(&rpc_list);
790         }
791         RETURN(rc);
792 }
793 #endif /* HAVE_SERVER_SUPPORT */
794
795 /* When a lock is cancelled by a client, the KMS may undergo change if this
796  * is the "highest lock".  This function returns the new KMS value.
797  * Caller must hold lr_lock already.
798  *
799  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
800 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
801 {
802         struct ldlm_resource *res = lock->l_resource;
803         struct list_head *tmp;
804         struct ldlm_lock *lck;
805         __u64 kms = 0;
806         ENTRY;
807
808         /* don't let another thread in ldlm_extent_shift_kms race in
809          * just after we finish and take our lock into account in its
810          * calculation of the kms */
811         ldlm_set_kms_ignore(lock);
812
813         list_for_each(tmp, &res->lr_granted) {
814                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
815
816                 if (ldlm_is_kms_ignore(lck))
817                         continue;
818
819                 if (lck->l_policy_data.l_extent.end >= old_kms)
820                         RETURN(old_kms);
821
822                 /* This extent _has_ to be smaller than old_kms (checked above)
823                  * so kms can only ever be smaller or the same as old_kms. */
824                 if (lck->l_policy_data.l_extent.end + 1 > kms)
825                         kms = lck->l_policy_data.l_extent.end + 1;
826         }
827         LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
828
829         RETURN(kms);
830 }
831 EXPORT_SYMBOL(ldlm_extent_shift_kms);
832
833 struct kmem_cache *ldlm_interval_slab;
834 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
835 {
836         struct ldlm_interval *node;
837         ENTRY;
838
839         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
840         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
841         if (node == NULL)
842                 RETURN(NULL);
843
844         INIT_LIST_HEAD(&node->li_group);
845         ldlm_interval_attach(node, lock);
846         RETURN(node);
847 }
848
849 void ldlm_interval_free(struct ldlm_interval *node)
850 {
851         if (node) {
852                 LASSERT(list_empty(&node->li_group));
853                 LASSERT(!interval_is_intree(&node->li_node));
854                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
855         }
856 }
857
858 /* interval tree, for LDLM_EXTENT. */
859 void ldlm_interval_attach(struct ldlm_interval *n,
860                           struct ldlm_lock *l)
861 {
862         LASSERT(l->l_tree_node == NULL);
863         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
864
865         list_add_tail(&l->l_sl_policy, &n->li_group);
866         l->l_tree_node = n;
867 }
868
869 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
870 {
871         struct ldlm_interval *n = l->l_tree_node;
872
873         if (n == NULL)
874                 return NULL;
875
876         LASSERT(!list_empty(&n->li_group));
877         l->l_tree_node = NULL;
878         list_del_init(&l->l_sl_policy);
879
880         return list_empty(&n->li_group) ? n : NULL;
881 }
882
883 static inline int lock_mode_to_index(ldlm_mode_t mode)
884 {
885         int index;
886
887         LASSERT(mode != 0);
888         LASSERT(IS_PO2(mode));
889         for (index = -1; mode; index++, mode >>= 1) ;
890         LASSERT(index < LCK_MODE_NUM);
891         return index;
892 }
893
894 /** Add newly granted lock into interval tree for the resource. */
895 void ldlm_extent_add_lock(struct ldlm_resource *res,
896                           struct ldlm_lock *lock)
897 {
898         struct interval_node *found, **root;
899         struct ldlm_interval *node;
900         struct ldlm_extent *extent;
901         int idx;
902
903         LASSERT(lock->l_granted_mode == lock->l_req_mode);
904
905         node = lock->l_tree_node;
906         LASSERT(node != NULL);
907         LASSERT(!interval_is_intree(&node->li_node));
908
909         idx = lock_mode_to_index(lock->l_granted_mode);
910         LASSERT(lock->l_granted_mode == 1 << idx);
911         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
912
913         /* node extent initialize */
914         extent = &lock->l_policy_data.l_extent;
915         interval_set(&node->li_node, extent->start, extent->end);
916
917         root = &res->lr_itree[idx].lit_root;
918         found = interval_insert(&node->li_node, root);
919         if (found) { /* The policy group found. */
920                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
921                 LASSERT(tmp != NULL);
922                 ldlm_interval_free(tmp);
923                 ldlm_interval_attach(to_ldlm_interval(found), lock);
924         }
925         res->lr_itree[idx].lit_size++;
926
927         /* even though we use interval tree to manage the extent lock, we also
928          * add the locks into grant list, for debug purpose, .. */
929         ldlm_resource_add_lock(res, &res->lr_granted, lock);
930 }
931
932 /** Remove cancelled lock from resource interval tree. */
933 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
934 {
935         struct ldlm_resource *res = lock->l_resource;
936         struct ldlm_interval *node = lock->l_tree_node;
937         struct ldlm_interval_tree *tree;
938         int idx;
939
940         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
941                 return;
942
943         idx = lock_mode_to_index(lock->l_granted_mode);
944         LASSERT(lock->l_granted_mode == 1 << idx);
945         tree = &res->lr_itree[idx];
946
947         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
948
949         tree->lit_size--;
950         node = ldlm_interval_detach(lock);
951         if (node) {
952                 interval_erase(&node->li_node, &tree->lit_root);
953                 ldlm_interval_free(node);
954         }
955 }
956
957 void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
958                                      ldlm_policy_data_t *lpolicy)
959 {
960         memset(lpolicy, 0, sizeof(*lpolicy));
961         lpolicy->l_extent.start = wpolicy->l_extent.start;
962         lpolicy->l_extent.end = wpolicy->l_extent.end;
963         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
964 }
965
966 void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
967                                      ldlm_wire_policy_data_t *wpolicy)
968 {
969         memset(wpolicy, 0, sizeof(*wpolicy));
970         wpolicy->l_extent.start = lpolicy->l_extent.start;
971         wpolicy->l_extent.end = lpolicy->l_extent.end;
972         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
973 }
974