Whamcloud - gitweb
4c1ac07bac6b66c5bc0705e282b35185055651b3
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2013, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_extent.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 /**
39  * This file contains implementation of EXTENT lock type
40  *
41  * EXTENT lock type is for locking a contiguous range of values, represented
42  * by 64-bit starting and ending offsets (inclusive). There are several extent
43  * lock modes, some of which may be mutually incompatible. Extent locks are
44  * considered incompatible if their modes are incompatible and their extents
45  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LDLM
49
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
53 #include <obd.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
56
57 #include "ldlm_internal.h"
58
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
61
62 /**
63  * Fix up the ldlm_extent after expanding it.
64  *
65  * After expansion has been done, we might still want to do certain adjusting
66  * based on overall contention of the resource and the like to avoid granting
67  * overly wide locks.
68  */
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70                                               struct ldlm_extent *new_ex,
71                                               int conflicting)
72 {
73         enum ldlm_mode req_mode = req->l_req_mode;
74         __u64 req_start = req->l_req_extent.start;
75         __u64 req_end = req->l_req_extent.end;
76         __u64 req_align, mask;
77
78         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
81                                           new_ex->end);
82         }
83
84         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
85                 EXIT;
86                 return;
87         }
88
89         /* we need to ensure that the lock extent is properly aligned to what
90          * the client requested. Also we need to make sure it's also server
91          * page size aligned otherwise a server page can be covered by two
92          * write locks. */
93         mask = PAGE_SIZE;
94         req_align = (req_end + 1) | req_start;
95         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96                 while ((req_align & mask) == 0)
97                         mask <<= 1;
98         }
99         mask -= 1;
100         /* We can only shrink the lock, not grow it.
101          * This should never cause lock to be smaller than requested,
102          * since requested lock was already aligned on these boundaries. */
103         new_ex->start = ((new_ex->start - 1) | mask) + 1;
104         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105         LASSERTF(new_ex->start <= req_start,
106                  "mask %#llx grant start %llu req start %llu\n",
107                  mask, new_ex->start, req_start);
108         LASSERTF(new_ex->end >= req_end,
109                  "mask %#llx grant end %llu req end %llu\n",
110                  mask, new_ex->end, req_end);
111 }
112
113 /**
114  * Return the maximum extent that:
115  * - contains the requested extent
116  * - does not overlap existing conflicting extents outside the requested one
117  *
118  * This allows clients to request a small required extent range, but if there
119  * is no contention on the lock the full lock can be granted to the client.
120  * This avoids the need for many smaller lock requests to be granted in the
121  * common (uncontended) case.
122  *
123  * Use interval tree to expand the lock extent for granted lock.
124  */
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126                                                 struct ldlm_extent *new_ex)
127 {
128         struct ldlm_resource *res = req->l_resource;
129         enum ldlm_mode req_mode = req->l_req_mode;
130         __u64 req_start = req->l_req_extent.start;
131         __u64 req_end = req->l_req_extent.end;
132         struct ldlm_interval_tree *tree;
133         struct interval_node_extent limiter = {
134                 .start  = new_ex->start,
135                 .end    = new_ex->end,
136         };
137         int conflicting = 0;
138         int idx;
139         ENTRY;
140
141         lockmode_verify(req_mode);
142
143         /* Using interval tree to handle the LDLM extent granted locks. */
144         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145                 struct interval_node_extent ext = {
146                         .start  = req_start,
147                         .end    = req_end,
148                 };
149
150                 tree = &res->lr_itree[idx];
151                 if (lockmode_compat(tree->lit_mode, req_mode))
152                         continue;
153
154                 conflicting += tree->lit_size;
155                 if (conflicting > 4)
156                         limiter.start = req_start;
157
158                 if (interval_is_overlapped(tree->lit_root, &ext))
159                         CDEBUG(D_INFO, 
160                                "req_mode = %d, tree->lit_mode = %d, "
161                                "tree->lit_size = %d\n",
162                                req_mode, tree->lit_mode, tree->lit_size);
163                 interval_expand(tree->lit_root, &ext, &limiter);
164                 limiter.start = max(limiter.start, ext.start);
165                 limiter.end = min(limiter.end, ext.end);
166                 if (limiter.start == req_start && limiter.end == req_end)
167                         break;
168         }
169
170         new_ex->start = limiter.start;
171         new_ex->end = limiter.end;
172         LASSERT(new_ex->start <= req_start);
173         LASSERT(new_ex->end >= req_end);
174
175         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
176         EXIT;
177 }
178
179 /* The purpose of this function is to return:
180  * - the maximum extent
181  * - containing the requested extent
182  * - and not overlapping existing conflicting extents outside the requested one
183  */
184 static void
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186                                     struct ldlm_extent *new_ex)
187 {
188         struct ldlm_resource *res = req->l_resource;
189         enum ldlm_mode req_mode = req->l_req_mode;
190         __u64 req_start = req->l_req_extent.start;
191         __u64 req_end = req->l_req_extent.end;
192         struct ldlm_lock *lock;
193         int conflicting = 0;
194         ENTRY;
195
196         lockmode_verify(req_mode);
197
198         /* for waiting locks */
199         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
201
202                 /* We already hit the minimum requested size, search no more */
203                 if (new_ex->start == req_start && new_ex->end == req_end) {
204                         EXIT;
205                         return;
206                 }
207
208                 /* Don't conflict with ourselves */
209                 if (req == lock)
210                         continue;
211
212                 /* Locks are compatible, overlap doesn't matter */
213                 /* Until bug 20 is fixed, try to avoid granting overlapping
214                  * locks on one client (they take a long time to cancel) */
215                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216                     lock->l_export != req->l_export)
217                         continue;
218
219                 /* If this is a high-traffic lock, don't grow downwards at all
220                  * or grow upwards too much */
221                 ++conflicting;
222                 if (conflicting > 4)
223                         new_ex->start = req_start;
224
225                 /* If lock doesn't overlap new_ex, skip it. */
226                 if (!ldlm_extent_overlap(l_extent, new_ex))
227                         continue;
228
229                 /* Locks conflicting in requested extents and we can't satisfy
230                  * both locks, so ignore it.  Either we will ping-pong this
231                  * extent (we would regardless of what extent we granted) or
232                  * lock is unused and it shouldn't limit our extent growth. */
233                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234                         continue;
235
236                 /* We grow extents downwards only as far as they don't overlap
237                  * with already-granted locks, on the assumption that clients
238                  * will be writing beyond the initial requested end and would
239                  * then need to enqueue a new lock beyond previous request.
240                  * l_req_extent->end strictly < req_start, checked above. */
241                 if (l_extent->start < req_start && new_ex->start != req_start) {
242                         if (l_extent->end >= req_start)
243                                 new_ex->start = req_start;
244                         else
245                                 new_ex->start = min(l_extent->end+1, req_start);
246                 }
247
248                 /* If we need to cancel this lock anyways because our request
249                  * overlaps the granted lock, we grow up to its requested
250                  * extent start instead of limiting this extent, assuming that
251                  * clients are writing forwards and the lock had over grown
252                  * its extent downwards before we enqueued our request. */
253                 if (l_extent->end > req_end) {
254                         if (l_extent->start <= req_end)
255                                 new_ex->end = max(lock->l_req_extent.start - 1,
256                                                   req_end);
257                         else
258                                 new_ex->end = max(l_extent->start - 1, req_end);
259                 }
260         }
261
262         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
263         EXIT;
264 }
265
266
267 /* In order to determine the largest possible extent we can grant, we need
268  * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270                                struct ldlm_lock *lock, __u64 *flags)
271 {
272         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
273
274         if (lock->l_export == NULL)
275                 /*
276                  * this is a local lock taken by server (e.g., as a part of
277                  * OST-side locking, or unlink handling). Expansion doesn't
278                  * make a lot of sense for local locks, because they are
279                  * dropped immediately on operation completion and would only
280                  * conflict with other threads.
281                  */
282                 return;
283
284         if (lock->l_policy_data.l_extent.start == 0 &&
285             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286                 /* fast-path whole file locks */
287                 return;
288
289         /* Because reprocess_queue zeroes flags and uses it to return
290          * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
291          * in the lock flags rather than the 'flags' argument */
292         if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
293                 ldlm_extent_internal_policy_granted(lock, &new_ex);
294                 ldlm_extent_internal_policy_waiting(lock, &new_ex);
295         } else {
296                 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
297                 new_ex.start = lock->l_policy_data.l_extent.start;
298                 new_ex.end = lock->l_policy_data.l_extent.end;
299                 /* In case the request is not on correct boundaries, we call
300                  * fixup. (normally called in ldlm_extent_internal_policy_*) */
301                 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
302         }
303
304         if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
305                 *flags |= LDLM_FL_LOCK_CHANGED;
306                 lock->l_policy_data.l_extent.start = new_ex.start;
307                 lock->l_policy_data.l_extent.end = new_ex.end;
308         }
309 }
310
311 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
312 {
313         struct ldlm_resource *res = lock->l_resource;
314         time64_t now = ktime_get_seconds();
315
316         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
317                 return 1;
318
319         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
320         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
321                 res->lr_contention_time = now;
322
323         return now < res->lr_contention_time +
324                      ldlm_res_to_ns(res)->ns_contention_time;
325 }
326
327 struct ldlm_extent_compat_args {
328         struct list_head *work_list;
329         struct ldlm_lock *lock;
330         enum ldlm_mode mode;
331         int *locks;
332         int *compat;
333 };
334
335 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
336                                                 void *data)
337 {
338         struct ldlm_extent_compat_args *priv = data;
339         struct ldlm_interval *node = to_ldlm_interval(n);
340         struct ldlm_extent *extent;
341         struct list_head *work_list = priv->work_list;
342         struct ldlm_lock *lock, *enq = priv->lock;
343         enum ldlm_mode mode = priv->mode;
344         int count = 0;
345         ENTRY;
346
347         LASSERT(!list_empty(&node->li_group));
348
349         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
350                 /* interval tree is for granted lock */
351                 LASSERTF(mode == lock->l_granted_mode,
352                          "mode = %s, lock->l_granted_mode = %s\n",
353                          ldlm_lockname[mode],
354                          ldlm_lockname[lock->l_granted_mode]);
355                 count++;
356                 if (lock->l_blocking_ast &&
357                     lock->l_granted_mode != LCK_GROUP)
358                         ldlm_add_ast_work_item(lock, enq, work_list);
359         }
360
361         /* don't count conflicting glimpse locks */
362         extent = ldlm_interval_extent(node);
363         if (!(mode == LCK_PR &&
364             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
365                 *priv->locks += count;
366
367         if (priv->compat)
368                 *priv->compat = 0;
369
370         RETURN(INTERVAL_ITER_CONT);
371 }
372
373 /**
374  * Determine if the lock is compatible with all locks on the queue.
375  *
376  * If \a work_list is provided, conflicting locks are linked there.
377  * If \a work_list is not provided, we exit this function on first conflict.
378  *
379  * \retval 0 if the lock is not compatible
380  * \retval 1 if the lock is compatible
381  * \retval 2 if \a req is a group lock and it is compatible and requires
382  *           no further checking
383  * \retval negative error, such as EWOULDBLOCK for group locks
384  */
385 static int
386 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
387                          __u64 *flags, enum ldlm_error *err,
388                          struct list_head *work_list, int *contended_locks)
389 {
390         struct ldlm_resource *res = req->l_resource;
391         enum ldlm_mode req_mode = req->l_req_mode;
392         __u64 req_start = req->l_req_extent.start;
393         __u64 req_end = req->l_req_extent.end;
394         struct ldlm_lock *lock;
395         int check_contention;
396         int compat = 1;
397         int scan = 0;
398         ENTRY;
399
400         lockmode_verify(req_mode);
401
402         /* Using interval tree for granted lock */
403         if (queue == &res->lr_granted) {
404                 struct ldlm_interval_tree *tree;
405                 struct ldlm_extent_compat_args data = {.work_list = work_list,
406                                                .lock = req,
407                                                .locks = contended_locks,
408                                                .compat = &compat };
409                 struct interval_node_extent ex = { .start = req_start,
410                                                    .end = req_end };
411                 int idx, rc;
412
413                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
414                         tree = &res->lr_itree[idx];
415                         if (tree->lit_root == NULL) /* empty tree, skipped */
416                                 continue;
417
418                         data.mode = tree->lit_mode;
419                         if (lockmode_compat(req_mode, tree->lit_mode)) {
420                                 struct ldlm_interval *node;
421                                 struct ldlm_extent *extent;
422
423                                 if (req_mode != LCK_GROUP)
424                                         continue;
425
426                                 /* group lock, grant it immediately if
427                                  * compatible */
428                                 node = to_ldlm_interval(tree->lit_root);
429                                 extent = ldlm_interval_extent(node);
430                                 if (req->l_policy_data.l_extent.gid ==
431                                     extent->gid)
432                                         RETURN(2);
433                         }
434
435                         if (tree->lit_mode == LCK_GROUP) {
436                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
437                                               LDLM_FL_SPECULATIVE)) {
438                                         compat = -EWOULDBLOCK;
439                                         goto destroylock;
440                                 }
441
442                                 *flags |= LDLM_FL_NO_TIMEOUT;
443                                 if (!work_list)
444                                         RETURN(0);
445
446                                 /* if work list is not NULL,add all
447                                    locks in the tree to work list */
448                                 compat = 0;
449                                 interval_iterate(tree->lit_root,
450                                                  ldlm_extent_compat_cb, &data);
451                                 continue;
452                         }
453
454                         /* We've found a potentially blocking lock, check
455                          * compatibility.  This handles locks other than GROUP
456                          * locks, which are handled separately above.
457                          *
458                          * Locks with FL_SPECULATIVE are asynchronous requests
459                          * which must never wait behind another lock, so they
460                          * fail if any conflicting lock is found. */
461                         if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
462                                 rc = interval_is_overlapped(tree->lit_root,
463                                                             &ex);
464                                 if (rc) {
465                                         if (!work_list) {
466                                                 RETURN(0);
467                                         } else {
468                                                 compat = -EWOULDBLOCK;
469                                                 goto destroylock;
470                                         }
471                                 }
472                         } else {
473                                 interval_search(tree->lit_root, &ex,
474                                                 ldlm_extent_compat_cb, &data);
475                                 if (!list_empty(work_list) && compat)
476                                         compat = 0;
477                         }
478                 }
479         } else { /* for waiting queue */
480                 list_for_each_entry(lock, queue, l_res_link) {
481                         check_contention = 1;
482
483                         /* We stop walking the queue if we hit ourselves so
484                          * we don't take conflicting locks enqueued after us
485                          * into account, or we'd wait forever. */
486                         if (req == lock)
487                                 break;
488
489                         if (unlikely(scan)) {
490                                 /* We only get here if we are queuing GROUP lock
491                                    and met some incompatible one. The main idea of this
492                                    code is to insert GROUP lock past compatible GROUP
493                                    lock in the waiting queue or if there is not any,
494                                    then in front of first non-GROUP lock */
495                                 if (lock->l_req_mode != LCK_GROUP) {
496                                         /* Ok, we hit non-GROUP lock, there should
497                                          * be no more GROUP locks later on, queue in
498                                          * front of first non-GROUP lock */
499
500                                         ldlm_resource_insert_lock_after(lock, req);
501                                         list_del_init(&lock->l_res_link);
502                                         ldlm_resource_insert_lock_after(req, lock);
503                                         compat = 0;
504                                         break;
505                                 }
506                                 if (req->l_policy_data.l_extent.gid ==
507                                     lock->l_policy_data.l_extent.gid) {
508                                         /* found it */
509                                         ldlm_resource_insert_lock_after(lock, req);
510                                         compat = 0;
511                                         break;
512                                 }
513                                 continue;
514                         }
515
516                         /* locks are compatible, overlap doesn't matter */
517                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
518                                 if (req_mode == LCK_PR &&
519                                     ((lock->l_policy_data.l_extent.start <=
520                                       req->l_policy_data.l_extent.start) &&
521                                      (lock->l_policy_data.l_extent.end >=
522                                       req->l_policy_data.l_extent.end))) {
523                                         /* If we met a PR lock just like us or
524                                            wider, and nobody down the list
525                                            conflicted with it, that means we
526                                            can skip processing of the rest of
527                                            the list and safely place ourselves
528                                            at the end of the list, or grant
529                                            (dependent if we met an conflicting
530                                            locks before in the list).  In case
531                                            of 1st enqueue only we continue
532                                            traversing if there is something
533                                            conflicting down the list because
534                                            we need to make sure that something
535                                            is marked as AST_SENT as well, in
536                                            cse of empy worklist we would exit
537                                            on first conflict met. */
538                                         /* There IS a case where such flag is
539                                            not set for a lock, yet it blocks
540                                            something. Luckily for us this is
541                                            only during destroy, so lock is
542                                            exclusive. So here we are safe */
543                                         if (!ldlm_is_ast_sent(lock))
544                                                 RETURN(compat);
545                                 }
546
547                                 /* non-group locks are compatible, overlap doesn't
548                                    matter */
549                                 if (likely(req_mode != LCK_GROUP))
550                                         continue;
551
552                                 /* If we are trying to get a GROUP lock and there is
553                                    another one of this kind, we need to compare gid */
554                                 if (req->l_policy_data.l_extent.gid ==
555                                     lock->l_policy_data.l_extent.gid) {
556                                         /* If existing lock with matched gid is granted,
557                                            we grant new one too. */
558                                         if (lock->l_req_mode == lock->l_granted_mode)
559                                                 RETURN(2);
560
561                                         /* Otherwise we are scanning queue of waiting
562                                          * locks and it means current request would
563                                          * block along with existing lock (that is
564                                          * already blocked.
565                                          * If we are in nonblocking mode - return
566                                          * immediately */
567                                         if (*flags & (LDLM_FL_BLOCK_NOWAIT
568                                                       | LDLM_FL_SPECULATIVE)) {
569                                                 compat = -EWOULDBLOCK;
570                                                 goto destroylock;
571                                         }
572                                         /* If this group lock is compatible with another
573                                          * group lock on the waiting list, they must be
574                                          * together in the list, so they can be granted
575                                          * at the same time.  Otherwise the later lock
576                                          * can get stuck behind another, incompatible,
577                                          * lock. */
578                                         ldlm_resource_insert_lock_after(lock, req);
579                                         /* Because 'lock' is not granted, we can stop
580                                          * processing this queue and return immediately.
581                                          * There is no need to check the rest of the
582                                          * list. */
583                                         RETURN(0);
584                                 }
585                         }
586
587                         if (unlikely(req_mode == LCK_GROUP &&
588                                      (lock->l_req_mode != lock->l_granted_mode))) {
589                                 scan = 1;
590                                 compat = 0;
591                                 if (lock->l_req_mode != LCK_GROUP) {
592                                         /* Ok, we hit non-GROUP lock, there should be no
593                                            more GROUP locks later on, queue in front of
594                                            first non-GROUP lock */
595
596                                         ldlm_resource_insert_lock_after(lock, req);
597                                         list_del_init(&lock->l_res_link);
598                                         ldlm_resource_insert_lock_after(req, lock);
599                                         break;
600                                 }
601                                 if (req->l_policy_data.l_extent.gid ==
602                                     lock->l_policy_data.l_extent.gid) {
603                                         /* found it */
604                                         ldlm_resource_insert_lock_after(lock, req);
605                                         break;
606                                 }
607                                 continue;
608                         }
609
610                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
611                                 /* If compared lock is GROUP, then requested is
612                                  * PR/PW so this is not compatible; extent
613                                  * range does not matter */
614                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT
615                                               | LDLM_FL_SPECULATIVE)) {
616                                         compat = -EWOULDBLOCK;
617                                         goto destroylock;
618                                 } else {
619                                         *flags |= LDLM_FL_NO_TIMEOUT;
620                                 }
621                         } else if (lock->l_policy_data.l_extent.end < req_start ||
622                                    lock->l_policy_data.l_extent.start > req_end) {
623                                 /* if a non group lock doesn't overlap skip it */
624                                 continue;
625                         } else if (lock->l_req_extent.end < req_start ||
626                                    lock->l_req_extent.start > req_end) {
627                                 /* false contention, the requests doesn't really overlap */
628                                 check_contention = 0;
629                         }
630
631                         if (!work_list)
632                                 RETURN(0);
633
634                         if (*flags & LDLM_FL_SPECULATIVE) {
635                                 compat = -EWOULDBLOCK;
636                                 goto destroylock;
637                         }
638
639                         /* don't count conflicting glimpse locks */
640                         if (lock->l_req_mode == LCK_PR &&
641                             lock->l_policy_data.l_extent.start == 0 &&
642                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
643                                 check_contention = 0;
644
645                         *contended_locks += check_contention;
646
647                         compat = 0;
648                         if (lock->l_blocking_ast &&
649                             lock->l_req_mode != LCK_GROUP)
650                                 ldlm_add_ast_work_item(lock, req, work_list);
651                 }
652         }
653
654         if (ldlm_check_contention(req, *contended_locks) &&
655             compat == 0 &&
656             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
657             req->l_req_mode != LCK_GROUP &&
658             req_end - req_start <=
659             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
660                 GOTO(destroylock, compat = -EUSERS);
661
662         RETURN(compat);
663 destroylock:
664         list_del_init(&req->l_res_link);
665         ldlm_lock_destroy_nolock(req);
666         *err = compat;
667         RETURN(compat);
668 }
669
670 /**
671  * This function refresh eviction timer for cancelled lock.
672  * \param[in] lock              ldlm lock for refresh
673  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
674  *                              and counter are used
675  */
676 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
677                            struct ldlm_prolong_args *arg)
678 {
679         time64_t timeout;
680
681         if (arg->lpa_export != lock->l_export ||
682             lock->l_flags & LDLM_FL_DESTROYED)
683                 /* ignore unrelated locks */
684                 return;
685
686         arg->lpa_locks_cnt++;
687
688         if (!(lock->l_flags & LDLM_FL_AST_SENT))
689                 /* ignore locks not being cancelled */
690                 return;
691
692         /* We are in the middle of the process - BL AST is sent, CANCEL
693          * is ahead. Take half of BL AT + IO AT process time.
694          */
695         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
696
697         LDLM_DEBUG(lock, "refreshed to %llds.\n", timeout);
698
699         arg->lpa_blocks_cnt++;
700
701         /* OK. this is a possible lock the user holds doing I/O
702          * let's refresh eviction timer for it.
703          */
704         ldlm_refresh_waiting_lock(lock, timeout);
705 }
706 EXPORT_SYMBOL(ldlm_lock_prolong_one);
707
708 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
709                                                    void *data)
710 {
711         struct ldlm_prolong_args *arg = data;
712         struct ldlm_interval *node = to_ldlm_interval(n);
713         struct ldlm_lock *lock;
714
715         ENTRY;
716
717         LASSERT(!list_empty(&node->li_group));
718
719         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
720                 ldlm_lock_prolong_one(lock, arg);
721         }
722
723         RETURN(INTERVAL_ITER_CONT);
724 }
725
726 /**
727  * Walk through granted tree and prolong locks if they overlaps extent.
728  *
729  * \param[in] arg               prolong args
730  */
731 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
732 {
733         struct ldlm_interval_tree *tree;
734         struct ldlm_resource *res;
735         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
736                                            .end = arg->lpa_extent.end };
737         int idx;
738
739         ENTRY;
740
741         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
742                                 &arg->lpa_resid, LDLM_EXTENT, 0);
743         if (IS_ERR(res)) {
744                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
745                        arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
746                 RETURN_EXIT;
747         }
748
749         lock_res(res);
750         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
751                 tree = &res->lr_itree[idx];
752                 if (tree->lit_root == NULL) /* empty tree, skipped */
753                         continue;
754
755                 /* There is no possibility to check for the groupID
756                  * so all the group locks are considered as valid
757                  * here, especially because the client is supposed
758                  * to check it has such a lock before sending an RPC.
759                  */
760                 if (!(tree->lit_mode & arg->lpa_mode))
761                         continue;
762
763                 interval_search(tree->lit_root, &ex,
764                                 ldlm_resource_prolong_cb, arg);
765         }
766
767         unlock_res(res);
768         ldlm_resource_putref(res);
769
770         EXIT;
771 }
772 EXPORT_SYMBOL(ldlm_resource_prolong);
773
774 /**
775  * Process a granting attempt for extent lock.
776  * Must be called with ns lock held.
777  *
778  * This function looks for any conflicts for \a lock in the granted or
779  * waiting queues. The lock is granted if no conflicts are found in
780  * either queue.
781  */
782 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
783                              enum ldlm_process_intention intention,
784                              enum ldlm_error *err, struct list_head *work_list)
785 {
786         struct ldlm_resource *res = lock->l_resource;
787         struct list_head rpc_list;
788         int rc, rc2;
789         int contended_locks = 0;
790         ENTRY;
791
792         LASSERT(lock->l_granted_mode != lock->l_req_mode);
793         LASSERT(list_empty(&res->lr_converting));
794         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
795                 !ldlm_is_ast_discard_data(lock));
796         INIT_LIST_HEAD(&rpc_list);
797         check_res_locked(res);
798         *err = ELDLM_OK;
799
800         if (intention == LDLM_PROCESS_RESCAN) {
801                 /* Careful observers will note that we don't handle -EWOULDBLOCK
802                  * here, but it's ok for a non-obvious reason -- compat_queue
803                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
804                  * SPECULATIVE). flags should always be zero here, and if that
805                  * ever stops being true, we want to find out. */
806                 LASSERT(*flags == 0);
807                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
808                                               err, NULL, &contended_locks);
809                 if (rc == 1) {
810                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
811                                                       flags, err, NULL,
812                                                       &contended_locks);
813                 }
814                 if (rc == 0)
815                         RETURN(LDLM_ITER_STOP);
816
817                 ldlm_resource_unlink_lock(lock);
818
819                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
820                         ldlm_extent_policy(res, lock, flags);
821                 ldlm_grant_lock(lock, work_list);
822                 RETURN(LDLM_ITER_CONTINUE);
823         }
824
825         LASSERT((intention == LDLM_PROCESS_ENQUEUE && work_list == NULL) ||
826                 (intention == LDLM_PROCESS_RECOVERY && work_list != NULL));
827  restart:
828         contended_locks = 0;
829         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
830                                       &rpc_list, &contended_locks);
831         if (rc < 0)
832                 GOTO(out_rpc_list, rc);
833
834         rc2 = 0;
835         if (rc != 2) {
836                 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
837                                                flags, err, &rpc_list,
838                                                &contended_locks);
839                 if (rc2 < 0)
840                         GOTO(out_rpc_list, rc = rc2);
841         }
842
843         if (rc + rc2 != 2) {
844                 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to force
845                  * client to wait for the lock endlessly once the lock is
846                  * enqueued -bzzz */
847                 rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list,
848                                                LDLM_FL_NO_TIMEOUT);
849                 if (rc == -ERESTART)
850                         GOTO(restart, rc);
851                 *err = rc;
852         } else {
853                 ldlm_extent_policy(res, lock, flags);
854                 ldlm_resource_unlink_lock(lock);
855                 ldlm_grant_lock(lock, work_list);
856                 rc = 0;
857         }
858
859 out_rpc_list:
860         if (!list_empty(&rpc_list)) {
861                 LASSERT(!ldlm_is_ast_discard_data(lock));
862                 ldlm_discard_bl_list(&rpc_list);
863         }
864         RETURN(rc);
865 }
866 #endif /* HAVE_SERVER_SUPPORT */
867
868 struct ldlm_kms_shift_args {
869         __u64   old_kms;
870         __u64   kms;
871         bool    complete;
872 };
873
874 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
875 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
876                                             void *args)
877 {
878         struct ldlm_kms_shift_args *arg = args;
879         struct ldlm_interval *node = to_ldlm_interval(n);
880         struct ldlm_lock *tmplock;
881         struct ldlm_lock *lock = NULL;
882
883         ENTRY;
884
885         /* Since all locks in an interval have the same extent, we can just
886          * use the first lock without kms_ignore set. */
887         list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
888                 if (ldlm_is_kms_ignore(tmplock))
889                         continue;
890
891                 lock = tmplock;
892
893                 break;
894         }
895
896         /* No locks in this interval without kms_ignore set */
897         if (!lock)
898                 RETURN(INTERVAL_ITER_CONT);
899
900         /* If we find a lock with a greater or equal kms, we are not the
901          * highest lock (or we share that distinction with another lock), and
902          * don't need to update KMS.  Return old_kms and stop looking. */
903         if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
904                 arg->kms = arg->old_kms;
905                 arg->complete = true;
906                 RETURN(INTERVAL_ITER_STOP);
907         }
908
909         if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
910                 arg->kms = lock->l_policy_data.l_extent.end + 1;
911
912         /* Since interval_iterate_reverse starts with the highest lock and
913          * works down, for PW locks, we only need to check if we should update
914          * the kms, then stop walking the tree.  PR locks are not exclusive, so
915          * the highest start does not imply the highest end and we must
916          * continue. (Only one group lock is allowed per resource, so this is
917          * irrelevant for group locks.)*/
918         if (lock->l_granted_mode == LCK_PW)
919                 RETURN(INTERVAL_ITER_STOP);
920         else
921                 RETURN(INTERVAL_ITER_CONT);
922 }
923
924 /* When a lock is cancelled by a client, the KMS may undergo change if this
925  * is the "highest lock".  This function returns the new KMS value, updating
926  * it only if we were the highest lock.
927  *
928  * Caller must hold lr_lock already.
929  *
930  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
931 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
932 {
933         struct ldlm_resource *res = lock->l_resource;
934         struct ldlm_interval_tree *tree;
935         struct ldlm_kms_shift_args args;
936         int idx = 0;
937
938         ENTRY;
939
940         args.old_kms = old_kms;
941         args.kms = 0;
942         args.complete = false;
943
944         /* don't let another thread in ldlm_extent_shift_kms race in
945          * just after we finish and take our lock into account in its
946          * calculation of the kms */
947         ldlm_set_kms_ignore(lock);
948
949         /* We iterate over the lock trees, looking for the largest kms smaller
950          * than the current one. */
951         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
952                 tree = &res->lr_itree[idx];
953
954                 /* If our already known kms is >= than the highest 'end' in
955                  * this tree, we don't need to check this tree, because
956                  * the kms from a tree can be lower than in_max_high (due to
957                  * kms_ignore), but it can never be higher. */
958                 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
959                         continue;
960
961                 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
962                                          &args);
963
964                 /* this tells us we're not the highest lock, so we don't need
965                  * to check the remaining trees */
966                 if (args.complete)
967                         break;
968         }
969
970         LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
971                  args.old_kms);
972
973         RETURN(args.kms);
974 }
975 EXPORT_SYMBOL(ldlm_extent_shift_kms);
976
977 struct kmem_cache *ldlm_interval_slab;
978 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
979 {
980         struct ldlm_interval *node;
981         ENTRY;
982
983         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
984         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
985         if (node == NULL)
986                 RETURN(NULL);
987
988         INIT_LIST_HEAD(&node->li_group);
989         ldlm_interval_attach(node, lock);
990         RETURN(node);
991 }
992
993 void ldlm_interval_free(struct ldlm_interval *node)
994 {
995         if (node) {
996                 LASSERT(list_empty(&node->li_group));
997                 LASSERT(!interval_is_intree(&node->li_node));
998                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
999         }
1000 }
1001
1002 /* interval tree, for LDLM_EXTENT. */
1003 void ldlm_interval_attach(struct ldlm_interval *n,
1004                           struct ldlm_lock *l)
1005 {
1006         LASSERT(l->l_tree_node == NULL);
1007         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
1008
1009         list_add_tail(&l->l_sl_policy, &n->li_group);
1010         l->l_tree_node = n;
1011 }
1012
1013 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
1014 {
1015         struct ldlm_interval *n = l->l_tree_node;
1016
1017         if (n == NULL)
1018                 return NULL;
1019
1020         LASSERT(!list_empty(&n->li_group));
1021         l->l_tree_node = NULL;
1022         list_del_init(&l->l_sl_policy);
1023
1024         return list_empty(&n->li_group) ? n : NULL;
1025 }
1026
1027 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
1028 {
1029         int index;
1030
1031         LASSERT(mode != 0);
1032         LASSERT(is_power_of_2(mode));
1033         for (index = -1; mode != 0; index++, mode >>= 1)
1034                 /* do nothing */;
1035         LASSERT(index < LCK_MODE_NUM);
1036         return index;
1037 }
1038
1039 /** Add newly granted lock into interval tree for the resource. */
1040 void ldlm_extent_add_lock(struct ldlm_resource *res,
1041                           struct ldlm_lock *lock)
1042 {
1043         struct interval_node *found, **root;
1044         struct ldlm_interval *node;
1045         struct ldlm_extent *extent;
1046         int idx, rc;
1047
1048         LASSERT(lock->l_granted_mode == lock->l_req_mode);
1049
1050         node = lock->l_tree_node;
1051         LASSERT(node != NULL);
1052         LASSERT(!interval_is_intree(&node->li_node));
1053
1054         idx = ldlm_mode_to_index(lock->l_granted_mode);
1055         LASSERT(lock->l_granted_mode == 1 << idx);
1056         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1057
1058         /* node extent initialize */
1059         extent = &lock->l_policy_data.l_extent;
1060
1061         rc = interval_set(&node->li_node, extent->start, extent->end);
1062         LASSERT(!rc);
1063
1064         root = &res->lr_itree[idx].lit_root;
1065         found = interval_insert(&node->li_node, root);
1066         if (found) { /* The policy group found. */
1067                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1068                 LASSERT(tmp != NULL);
1069                 ldlm_interval_free(tmp);
1070                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1071         }
1072         res->lr_itree[idx].lit_size++;
1073
1074         /* even though we use interval tree to manage the extent lock, we also
1075          * add the locks into grant list, for debug purpose, .. */
1076         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1077
1078         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1079                 struct ldlm_lock *lck;
1080
1081                 list_for_each_entry_reverse(lck, &res->lr_granted,
1082                                             l_res_link) {
1083                         if (lck == lock)
1084                                 continue;
1085                         if (lockmode_compat(lck->l_granted_mode,
1086                                             lock->l_granted_mode))
1087                                 continue;
1088                         if (ldlm_extent_overlap(&lck->l_req_extent,
1089                                                 &lock->l_req_extent)) {
1090                                 CDEBUG(D_ERROR, "granting conflicting lock %p "
1091                                                 "%p\n", lck, lock);
1092                                 ldlm_resource_dump(D_ERROR, res);
1093                                 LBUG();
1094                         }
1095                 }
1096         }
1097 }
1098
1099 /** Remove cancelled lock from resource interval tree. */
1100 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1101 {
1102         struct ldlm_resource *res = lock->l_resource;
1103         struct ldlm_interval *node = lock->l_tree_node;
1104         struct ldlm_interval_tree *tree;
1105         int idx;
1106
1107         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1108                 return;
1109
1110         idx = ldlm_mode_to_index(lock->l_granted_mode);
1111         LASSERT(lock->l_granted_mode == 1 << idx);
1112         tree = &res->lr_itree[idx];
1113
1114         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1115
1116         tree->lit_size--;
1117         node = ldlm_interval_detach(lock);
1118         if (node) {
1119                 interval_erase(&node->li_node, &tree->lit_root);
1120                 ldlm_interval_free(node);
1121         }
1122 }
1123
1124 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1125                                       union ldlm_policy_data *lpolicy)
1126 {
1127         lpolicy->l_extent.start = wpolicy->l_extent.start;
1128         lpolicy->l_extent.end = wpolicy->l_extent.end;
1129         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1130 }
1131
1132 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1133                                       union ldlm_wire_policy_data *wpolicy)
1134 {
1135         memset(wpolicy, 0, sizeof(*wpolicy));
1136         wpolicy->l_extent.start = lpolicy->l_extent.start;
1137         wpolicy->l_extent.end = lpolicy->l_extent.end;
1138         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1139 }
1140