Whamcloud - gitweb
LU-9405 utils: remove device path parsing from mount.lustre
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2013, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_extent.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 /**
39  * This file contains implementation of EXTENT lock type
40  *
41  * EXTENT lock type is for locking a contiguous range of values, represented
42  * by 64-bit starting and ending offsets (inclusive). There are several extent
43  * lock modes, some of which may be mutually incompatible. Extent locks are
44  * considered incompatible if their modes are incompatible and their extents
45  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LDLM
49
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
53 #include <obd.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
56
57 #include "ldlm_internal.h"
58
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
61
62 /**
63  * Fix up the ldlm_extent after expanding it.
64  *
65  * After expansion has been done, we might still want to do certain adjusting
66  * based on overall contention of the resource and the like to avoid granting
67  * overly wide locks.
68  */
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70                                               struct ldlm_extent *new_ex,
71                                               int conflicting)
72 {
73         enum ldlm_mode req_mode = req->l_req_mode;
74         __u64 req_start = req->l_req_extent.start;
75         __u64 req_end = req->l_req_extent.end;
76         __u64 req_align, mask;
77
78         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
81                                           new_ex->end);
82         }
83
84         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
85                 EXIT;
86                 return;
87         }
88
89         /* we need to ensure that the lock extent is properly aligned to what
90          * the client requested. Also we need to make sure it's also server
91          * page size aligned otherwise a server page can be covered by two
92          * write locks. */
93         mask = PAGE_SIZE;
94         req_align = (req_end + 1) | req_start;
95         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96                 while ((req_align & mask) == 0)
97                         mask <<= 1;
98         }
99         mask -= 1;
100         /* We can only shrink the lock, not grow it.
101          * This should never cause lock to be smaller than requested,
102          * since requested lock was already aligned on these boundaries. */
103         new_ex->start = ((new_ex->start - 1) | mask) + 1;
104         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105         LASSERTF(new_ex->start <= req_start,
106                  "mask %#llx grant start %llu req start %llu\n",
107                  mask, new_ex->start, req_start);
108         LASSERTF(new_ex->end >= req_end,
109                  "mask %#llx grant end %llu req end %llu\n",
110                  mask, new_ex->end, req_end);
111 }
112
113 /**
114  * Return the maximum extent that:
115  * - contains the requested extent
116  * - does not overlap existing conflicting extents outside the requested one
117  *
118  * This allows clients to request a small required extent range, but if there
119  * is no contention on the lock the full lock can be granted to the client.
120  * This avoids the need for many smaller lock requests to be granted in the
121  * common (uncontended) case.
122  *
123  * Use interval tree to expand the lock extent for granted lock.
124  */
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126                                                 struct ldlm_extent *new_ex)
127 {
128         struct ldlm_resource *res = req->l_resource;
129         enum ldlm_mode req_mode = req->l_req_mode;
130         __u64 req_start = req->l_req_extent.start;
131         __u64 req_end = req->l_req_extent.end;
132         struct ldlm_interval_tree *tree;
133         struct interval_node_extent limiter = {
134                 .start  = new_ex->start,
135                 .end    = new_ex->end,
136         };
137         int conflicting = 0;
138         int idx;
139         ENTRY;
140
141         lockmode_verify(req_mode);
142
143         /* Using interval tree to handle the LDLM extent granted locks. */
144         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145                 struct interval_node_extent ext = {
146                         .start  = req_start,
147                         .end    = req_end,
148                 };
149
150                 tree = &res->lr_itree[idx];
151                 if (lockmode_compat(tree->lit_mode, req_mode))
152                         continue;
153
154                 conflicting += tree->lit_size;
155                 if (conflicting > 4)
156                         limiter.start = req_start;
157
158                 if (interval_is_overlapped(tree->lit_root, &ext))
159                         CDEBUG(D_INFO, 
160                                "req_mode = %d, tree->lit_mode = %d, "
161                                "tree->lit_size = %d\n",
162                                req_mode, tree->lit_mode, tree->lit_size);
163                 interval_expand(tree->lit_root, &ext, &limiter);
164                 limiter.start = max(limiter.start, ext.start);
165                 limiter.end = min(limiter.end, ext.end);
166                 if (limiter.start == req_start && limiter.end == req_end)
167                         break;
168         }
169
170         new_ex->start = limiter.start;
171         new_ex->end = limiter.end;
172         LASSERT(new_ex->start <= req_start);
173         LASSERT(new_ex->end >= req_end);
174
175         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
176         EXIT;
177 }
178
179 /* The purpose of this function is to return:
180  * - the maximum extent
181  * - containing the requested extent
182  * - and not overlapping existing conflicting extents outside the requested one
183  */
184 static void
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186                                     struct ldlm_extent *new_ex)
187 {
188         struct ldlm_resource *res = req->l_resource;
189         enum ldlm_mode req_mode = req->l_req_mode;
190         __u64 req_start = req->l_req_extent.start;
191         __u64 req_end = req->l_req_extent.end;
192         struct ldlm_lock *lock;
193         int conflicting = 0;
194         ENTRY;
195
196         lockmode_verify(req_mode);
197
198         /* for waiting locks */
199         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
201
202                 /* We already hit the minimum requested size, search no more */
203                 if (new_ex->start == req_start && new_ex->end == req_end) {
204                         EXIT;
205                         return;
206                 }
207
208                 /* Don't conflict with ourselves */
209                 if (req == lock)
210                         continue;
211
212                 /* Locks are compatible, overlap doesn't matter */
213                 /* Until bug 20 is fixed, try to avoid granting overlapping
214                  * locks on one client (they take a long time to cancel) */
215                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216                     lock->l_export != req->l_export)
217                         continue;
218
219                 /* If this is a high-traffic lock, don't grow downwards at all
220                  * or grow upwards too much */
221                 ++conflicting;
222                 if (conflicting > 4)
223                         new_ex->start = req_start;
224
225                 /* If lock doesn't overlap new_ex, skip it. */
226                 if (!ldlm_extent_overlap(l_extent, new_ex))
227                         continue;
228
229                 /* Locks conflicting in requested extents and we can't satisfy
230                  * both locks, so ignore it.  Either we will ping-pong this
231                  * extent (we would regardless of what extent we granted) or
232                  * lock is unused and it shouldn't limit our extent growth. */
233                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234                         continue;
235
236                 /* We grow extents downwards only as far as they don't overlap
237                  * with already-granted locks, on the assumption that clients
238                  * will be writing beyond the initial requested end and would
239                  * then need to enqueue a new lock beyond previous request.
240                  * l_req_extent->end strictly < req_start, checked above. */
241                 if (l_extent->start < req_start && new_ex->start != req_start) {
242                         if (l_extent->end >= req_start)
243                                 new_ex->start = req_start;
244                         else
245                                 new_ex->start = min(l_extent->end+1, req_start);
246                 }
247
248                 /* If we need to cancel this lock anyways because our request
249                  * overlaps the granted lock, we grow up to its requested
250                  * extent start instead of limiting this extent, assuming that
251                  * clients are writing forwards and the lock had over grown
252                  * its extent downwards before we enqueued our request. */
253                 if (l_extent->end > req_end) {
254                         if (l_extent->start <= req_end)
255                                 new_ex->end = max(lock->l_req_extent.start - 1,
256                                                   req_end);
257                         else
258                                 new_ex->end = max(l_extent->start - 1, req_end);
259                 }
260         }
261
262         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
263         EXIT;
264 }
265
266
267 /* In order to determine the largest possible extent we can grant, we need
268  * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270                                struct ldlm_lock *lock, __u64 *flags)
271 {
272         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
273
274         if (lock->l_export == NULL)
275                 /*
276                  * this is a local lock taken by server (e.g., as a part of
277                  * OST-side locking, or unlink handling). Expansion doesn't
278                  * make a lot of sense for local locks, because they are
279                  * dropped immediately on operation completion and would only
280                  * conflict with other threads.
281                  */
282                 return;
283
284         if (lock->l_policy_data.l_extent.start == 0 &&
285             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286                 /* fast-path whole file locks */
287                 return;
288
289         /* Because reprocess_queue zeroes flags and uses it to return
290          * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
291          * in the lock flags rather than the 'flags' argument */
292         if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
293                 ldlm_extent_internal_policy_granted(lock, &new_ex);
294                 ldlm_extent_internal_policy_waiting(lock, &new_ex);
295         } else {
296                 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
297                 new_ex.start = lock->l_policy_data.l_extent.start;
298                 new_ex.end = lock->l_policy_data.l_extent.end;
299                 /* In case the request is not on correct boundaries, we call
300                  * fixup. (normally called in ldlm_extent_internal_policy_*) */
301                 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
302         }
303
304         if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
305                 *flags |= LDLM_FL_LOCK_CHANGED;
306                 lock->l_policy_data.l_extent.start = new_ex.start;
307                 lock->l_policy_data.l_extent.end = new_ex.end;
308         }
309 }
310
311 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
312 {
313         struct ldlm_resource *res = lock->l_resource;
314         cfs_time_t now = cfs_time_current();
315
316         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
317                 return 1;
318
319         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
320         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
321                 res->lr_contention_time = now;
322         return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
323                 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
324 }
325
326 struct ldlm_extent_compat_args {
327         struct list_head *work_list;
328         struct ldlm_lock *lock;
329         enum ldlm_mode mode;
330         int *locks;
331         int *compat;
332 };
333
334 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
335                                                 void *data)
336 {
337         struct ldlm_extent_compat_args *priv = data;
338         struct ldlm_interval *node = to_ldlm_interval(n);
339         struct ldlm_extent *extent;
340         struct list_head *work_list = priv->work_list;
341         struct ldlm_lock *lock, *enq = priv->lock;
342         enum ldlm_mode mode = priv->mode;
343         int count = 0;
344         ENTRY;
345
346         LASSERT(!list_empty(&node->li_group));
347
348         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
349                 /* interval tree is for granted lock */
350                 LASSERTF(mode == lock->l_granted_mode,
351                          "mode = %s, lock->l_granted_mode = %s\n",
352                          ldlm_lockname[mode],
353                          ldlm_lockname[lock->l_granted_mode]);
354                 count++;
355                 if (lock->l_blocking_ast &&
356                     lock->l_granted_mode != LCK_GROUP)
357                         ldlm_add_ast_work_item(lock, enq, work_list);
358         }
359
360         /* don't count conflicting glimpse locks */
361         extent = ldlm_interval_extent(node);
362         if (!(mode == LCK_PR &&
363             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
364                 *priv->locks += count;
365
366         if (priv->compat)
367                 *priv->compat = 0;
368
369         RETURN(INTERVAL_ITER_CONT);
370 }
371
372 /**
373  * Determine if the lock is compatible with all locks on the queue.
374  *
375  * If \a work_list is provided, conflicting locks are linked there.
376  * If \a work_list is not provided, we exit this function on first conflict.
377  *
378  * \retval 0 if the lock is not compatible
379  * \retval 1 if the lock is compatible
380  * \retval 2 if \a req is a group lock and it is compatible and requires
381  *           no further checking
382  * \retval negative error, such as EWOULDBLOCK for group locks
383  */
384 static int
385 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
386                          __u64 *flags, enum ldlm_error *err,
387                          struct list_head *work_list, int *contended_locks)
388 {
389         struct ldlm_resource *res = req->l_resource;
390         enum ldlm_mode req_mode = req->l_req_mode;
391         __u64 req_start = req->l_req_extent.start;
392         __u64 req_end = req->l_req_extent.end;
393         struct ldlm_lock *lock;
394         int check_contention;
395         int compat = 1;
396         int scan = 0;
397         ENTRY;
398
399         lockmode_verify(req_mode);
400
401         /* Using interval tree for granted lock */
402         if (queue == &res->lr_granted) {
403                 struct ldlm_interval_tree *tree;
404                 struct ldlm_extent_compat_args data = {.work_list = work_list,
405                                                .lock = req,
406                                                .locks = contended_locks,
407                                                .compat = &compat };
408                 struct interval_node_extent ex = { .start = req_start,
409                                                    .end = req_end };
410                 int idx, rc;
411
412                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
413                         tree = &res->lr_itree[idx];
414                         if (tree->lit_root == NULL) /* empty tree, skipped */
415                                 continue;
416
417                         data.mode = tree->lit_mode;
418                         if (lockmode_compat(req_mode, tree->lit_mode)) {
419                                 struct ldlm_interval *node;
420                                 struct ldlm_extent *extent;
421
422                                 if (req_mode != LCK_GROUP)
423                                         continue;
424
425                                 /* group lock, grant it immediately if
426                                  * compatible */
427                                 node = to_ldlm_interval(tree->lit_root);
428                                 extent = ldlm_interval_extent(node);
429                                 if (req->l_policy_data.l_extent.gid ==
430                                     extent->gid)
431                                         RETURN(2);
432                         }
433
434                         if (tree->lit_mode == LCK_GROUP) {
435                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
436                                               LDLM_FL_SPECULATIVE)) {
437                                         compat = -EWOULDBLOCK;
438                                         goto destroylock;
439                                 }
440
441                                 *flags |= LDLM_FL_NO_TIMEOUT;
442                                 if (!work_list)
443                                         RETURN(0);
444
445                                 /* if work list is not NULL,add all
446                                    locks in the tree to work list */
447                                 compat = 0;
448                                 interval_iterate(tree->lit_root,
449                                                  ldlm_extent_compat_cb, &data);
450                                 continue;
451                         }
452
453                         /* We've found a potentially blocking lock, check
454                          * compatibility.  This handles locks other than GROUP
455                          * locks, which are handled separately above.
456                          *
457                          * Locks with FL_SPECULATIVE are asynchronous requests
458                          * which must never wait behind another lock, so they
459                          * fail if any conflicting lock is found. */
460                         if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
461                                 rc = interval_is_overlapped(tree->lit_root,
462                                                             &ex);
463                                 if (rc) {
464                                         if (!work_list) {
465                                                 RETURN(0);
466                                         } else {
467                                                 compat = -EWOULDBLOCK;
468                                                 goto destroylock;
469                                         }
470                                 }
471                         } else {
472                                 interval_search(tree->lit_root, &ex,
473                                                 ldlm_extent_compat_cb, &data);
474                                 if (!list_empty(work_list) && compat)
475                                         compat = 0;
476                         }
477                 }
478         } else { /* for waiting queue */
479                 list_for_each_entry(lock, queue, l_res_link) {
480                         check_contention = 1;
481
482                         /* We stop walking the queue if we hit ourselves so
483                          * we don't take conflicting locks enqueued after us
484                          * into account, or we'd wait forever. */
485                         if (req == lock)
486                                 break;
487
488                         if (unlikely(scan)) {
489                                 /* We only get here if we are queuing GROUP lock
490                                    and met some incompatible one. The main idea of this
491                                    code is to insert GROUP lock past compatible GROUP
492                                    lock in the waiting queue or if there is not any,
493                                    then in front of first non-GROUP lock */
494                                 if (lock->l_req_mode != LCK_GROUP) {
495                                         /* Ok, we hit non-GROUP lock, there should
496                                          * be no more GROUP locks later on, queue in
497                                          * front of first non-GROUP lock */
498
499                                         ldlm_resource_insert_lock_after(lock, req);
500                                         list_del_init(&lock->l_res_link);
501                                         ldlm_resource_insert_lock_after(req, lock);
502                                         compat = 0;
503                                         break;
504                                 }
505                                 if (req->l_policy_data.l_extent.gid ==
506                                     lock->l_policy_data.l_extent.gid) {
507                                         /* found it */
508                                         ldlm_resource_insert_lock_after(lock, req);
509                                         compat = 0;
510                                         break;
511                                 }
512                                 continue;
513                         }
514
515                         /* locks are compatible, overlap doesn't matter */
516                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
517                                 if (req_mode == LCK_PR &&
518                                     ((lock->l_policy_data.l_extent.start <=
519                                       req->l_policy_data.l_extent.start) &&
520                                      (lock->l_policy_data.l_extent.end >=
521                                       req->l_policy_data.l_extent.end))) {
522                                         /* If we met a PR lock just like us or
523                                            wider, and nobody down the list
524                                            conflicted with it, that means we
525                                            can skip processing of the rest of
526                                            the list and safely place ourselves
527                                            at the end of the list, or grant
528                                            (dependent if we met an conflicting
529                                            locks before in the list).  In case
530                                            of 1st enqueue only we continue
531                                            traversing if there is something
532                                            conflicting down the list because
533                                            we need to make sure that something
534                                            is marked as AST_SENT as well, in
535                                            cse of empy worklist we would exit
536                                            on first conflict met. */
537                                         /* There IS a case where such flag is
538                                            not set for a lock, yet it blocks
539                                            something. Luckily for us this is
540                                            only during destroy, so lock is
541                                            exclusive. So here we are safe */
542                                         if (!ldlm_is_ast_sent(lock))
543                                                 RETURN(compat);
544                                 }
545
546                                 /* non-group locks are compatible, overlap doesn't
547                                    matter */
548                                 if (likely(req_mode != LCK_GROUP))
549                                         continue;
550
551                                 /* If we are trying to get a GROUP lock and there is
552                                    another one of this kind, we need to compare gid */
553                                 if (req->l_policy_data.l_extent.gid ==
554                                     lock->l_policy_data.l_extent.gid) {
555                                         /* If existing lock with matched gid is granted,
556                                            we grant new one too. */
557                                         if (lock->l_req_mode == lock->l_granted_mode)
558                                                 RETURN(2);
559
560                                         /* Otherwise we are scanning queue of waiting
561                                          * locks and it means current request would
562                                          * block along with existing lock (that is
563                                          * already blocked.
564                                          * If we are in nonblocking mode - return
565                                          * immediately */
566                                         if (*flags & (LDLM_FL_BLOCK_NOWAIT
567                                                       | LDLM_FL_SPECULATIVE)) {
568                                                 compat = -EWOULDBLOCK;
569                                                 goto destroylock;
570                                         }
571                                         /* If this group lock is compatible with another
572                                          * group lock on the waiting list, they must be
573                                          * together in the list, so they can be granted
574                                          * at the same time.  Otherwise the later lock
575                                          * can get stuck behind another, incompatible,
576                                          * lock. */
577                                         ldlm_resource_insert_lock_after(lock, req);
578                                         /* Because 'lock' is not granted, we can stop
579                                          * processing this queue and return immediately.
580                                          * There is no need to check the rest of the
581                                          * list. */
582                                         RETURN(0);
583                                 }
584                         }
585
586                         if (unlikely(req_mode == LCK_GROUP &&
587                                      (lock->l_req_mode != lock->l_granted_mode))) {
588                                 scan = 1;
589                                 compat = 0;
590                                 if (lock->l_req_mode != LCK_GROUP) {
591                                         /* Ok, we hit non-GROUP lock, there should be no
592                                            more GROUP locks later on, queue in front of
593                                            first non-GROUP lock */
594
595                                         ldlm_resource_insert_lock_after(lock, req);
596                                         list_del_init(&lock->l_res_link);
597                                         ldlm_resource_insert_lock_after(req, lock);
598                                         break;
599                                 }
600                                 if (req->l_policy_data.l_extent.gid ==
601                                     lock->l_policy_data.l_extent.gid) {
602                                         /* found it */
603                                         ldlm_resource_insert_lock_after(lock, req);
604                                         break;
605                                 }
606                                 continue;
607                         }
608
609                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
610                                 /* If compared lock is GROUP, then requested is
611                                  * PR/PW so this is not compatible; extent
612                                  * range does not matter */
613                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT
614                                               | LDLM_FL_SPECULATIVE)) {
615                                         compat = -EWOULDBLOCK;
616                                         goto destroylock;
617                                 } else {
618                                         *flags |= LDLM_FL_NO_TIMEOUT;
619                                 }
620                         } else if (lock->l_policy_data.l_extent.end < req_start ||
621                                    lock->l_policy_data.l_extent.start > req_end) {
622                                 /* if a non group lock doesn't overlap skip it */
623                                 continue;
624                         } else if (lock->l_req_extent.end < req_start ||
625                                    lock->l_req_extent.start > req_end) {
626                                 /* false contention, the requests doesn't really overlap */
627                                 check_contention = 0;
628                         }
629
630                         if (!work_list)
631                                 RETURN(0);
632
633                         if (*flags & LDLM_FL_SPECULATIVE) {
634                                 compat = -EWOULDBLOCK;
635                                 goto destroylock;
636                         }
637
638                         /* don't count conflicting glimpse locks */
639                         if (lock->l_req_mode == LCK_PR &&
640                             lock->l_policy_data.l_extent.start == 0 &&
641                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
642                                 check_contention = 0;
643
644                         *contended_locks += check_contention;
645
646                         compat = 0;
647                         if (lock->l_blocking_ast &&
648                             lock->l_req_mode != LCK_GROUP)
649                                 ldlm_add_ast_work_item(lock, req, work_list);
650                 }
651         }
652
653         if (ldlm_check_contention(req, *contended_locks) &&
654             compat == 0 &&
655             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
656             req->l_req_mode != LCK_GROUP &&
657             req_end - req_start <=
658             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
659                 GOTO(destroylock, compat = -EUSERS);
660
661         RETURN(compat);
662 destroylock:
663         list_del_init(&req->l_res_link);
664         ldlm_lock_destroy_nolock(req);
665         *err = compat;
666         RETURN(compat);
667 }
668
669 /**
670  * This function refresh eviction timer for cancelled lock.
671  * \param[in] lock              ldlm lock for refresh
672  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
673  *                              and counter are used
674  */
675 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
676                            struct ldlm_prolong_args *arg)
677 {
678         int timeout;
679
680         if (arg->lpa_export != lock->l_export ||
681             lock->l_flags & LDLM_FL_DESTROYED)
682                 /* ignore unrelated locks */
683                 return;
684
685         arg->lpa_locks_cnt++;
686
687         if (!(lock->l_flags & LDLM_FL_AST_SENT))
688                 /* ignore locks not being cancelled */
689                 return;
690
691         /* We are in the middle of the process - BL AST is sent, CANCEL
692          * is ahead. Take half of BL AT + IO AT process time.
693          */
694         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
695
696         LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
697
698         arg->lpa_blocks_cnt++;
699
700         /* OK. this is a possible lock the user holds doing I/O
701          * let's refresh eviction timer for it.
702          */
703         ldlm_refresh_waiting_lock(lock, timeout);
704 }
705 EXPORT_SYMBOL(ldlm_lock_prolong_one);
706
707 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
708                                                    void *data)
709 {
710         struct ldlm_prolong_args *arg = data;
711         struct ldlm_interval *node = to_ldlm_interval(n);
712         struct ldlm_lock *lock;
713
714         ENTRY;
715
716         LASSERT(!list_empty(&node->li_group));
717
718         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
719                 ldlm_lock_prolong_one(lock, arg);
720         }
721
722         RETURN(INTERVAL_ITER_CONT);
723 }
724
725 /**
726  * Walk through granted tree and prolong locks if they overlaps extent.
727  *
728  * \param[in] arg               prolong args
729  */
730 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
731 {
732         struct ldlm_interval_tree *tree;
733         struct ldlm_resource *res;
734         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
735                                            .end = arg->lpa_extent.end };
736         int idx;
737
738         ENTRY;
739
740         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
741                                 &arg->lpa_resid, LDLM_EXTENT, 0);
742         if (IS_ERR(res)) {
743                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
744                        arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
745                 RETURN_EXIT;
746         }
747
748         lock_res(res);
749         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
750                 tree = &res->lr_itree[idx];
751                 if (tree->lit_root == NULL) /* empty tree, skipped */
752                         continue;
753
754                 /* There is no possibility to check for the groupID
755                  * so all the group locks are considered as valid
756                  * here, especially because the client is supposed
757                  * to check it has such a lock before sending an RPC.
758                  */
759                 if (!(tree->lit_mode & arg->lpa_mode))
760                         continue;
761
762                 interval_search(tree->lit_root, &ex,
763                                 ldlm_resource_prolong_cb, arg);
764         }
765
766         unlock_res(res);
767         ldlm_resource_putref(res);
768
769         EXIT;
770 }
771 EXPORT_SYMBOL(ldlm_resource_prolong);
772
773 /**
774  * Process a granting attempt for extent lock.
775  * Must be called with ns lock held.
776  *
777  * This function looks for any conflicts for \a lock in the granted or
778  * waiting queues. The lock is granted if no conflicts are found in
779  * either queue.
780  */
781 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
782                              enum ldlm_process_intention intention,
783                              enum ldlm_error *err, struct list_head *work_list)
784 {
785         struct ldlm_resource *res = lock->l_resource;
786         struct list_head rpc_list;
787         int rc, rc2;
788         int contended_locks = 0;
789         ENTRY;
790
791         LASSERT(lock->l_granted_mode != lock->l_req_mode);
792         LASSERT(list_empty(&res->lr_converting));
793         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
794                 !ldlm_is_ast_discard_data(lock));
795         INIT_LIST_HEAD(&rpc_list);
796         check_res_locked(res);
797         *err = ELDLM_OK;
798
799         if (intention == LDLM_PROCESS_RESCAN) {
800                 /* Careful observers will note that we don't handle -EWOULDBLOCK
801                  * here, but it's ok for a non-obvious reason -- compat_queue
802                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
803                  * SPECULATIVE). flags should always be zero here, and if that
804                  * ever stops being true, we want to find out. */
805                 LASSERT(*flags == 0);
806                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
807                                               err, NULL, &contended_locks);
808                 if (rc == 1) {
809                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
810                                                       flags, err, NULL,
811                                                       &contended_locks);
812                 }
813                 if (rc == 0)
814                         RETURN(LDLM_ITER_STOP);
815
816                 ldlm_resource_unlink_lock(lock);
817
818                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
819                         ldlm_extent_policy(res, lock, flags);
820                 ldlm_grant_lock(lock, work_list);
821                 RETURN(LDLM_ITER_CONTINUE);
822         }
823
824         LASSERT((intention == LDLM_PROCESS_ENQUEUE && work_list == NULL) ||
825                 (intention == LDLM_PROCESS_RECOVERY && work_list != NULL));
826  restart:
827         contended_locks = 0;
828         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
829                                       &rpc_list, &contended_locks);
830         if (rc < 0)
831                 GOTO(out_rpc_list, rc);
832
833         rc2 = 0;
834         if (rc != 2) {
835                 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
836                                                flags, err, &rpc_list,
837                                                &contended_locks);
838                 if (rc2 < 0)
839                         GOTO(out_rpc_list, rc = rc2);
840         }
841
842         if (rc + rc2 != 2) {
843                 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to force
844                  * client to wait for the lock endlessly once the lock is
845                  * enqueued -bzzz */
846                 rc = ldlm_handle_conflict_lock(lock, flags, &rpc_list,
847                                                LDLM_FL_NO_TIMEOUT);
848                 if (rc == -ERESTART)
849                         GOTO(restart, rc);
850                 *err = rc;
851         } else {
852                 ldlm_extent_policy(res, lock, flags);
853                 ldlm_resource_unlink_lock(lock);
854                 ldlm_grant_lock(lock, work_list);
855                 rc = 0;
856         }
857
858 out_rpc_list:
859         if (!list_empty(&rpc_list)) {
860                 LASSERT(!ldlm_is_ast_discard_data(lock));
861                 ldlm_discard_bl_list(&rpc_list);
862         }
863         RETURN(rc);
864 }
865 #endif /* HAVE_SERVER_SUPPORT */
866
867 struct ldlm_kms_shift_args {
868         __u64   old_kms;
869         __u64   kms;
870         bool    complete;
871 };
872
873 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
874 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
875                                             void *args)
876 {
877         struct ldlm_kms_shift_args *arg = args;
878         struct ldlm_interval *node = to_ldlm_interval(n);
879         struct ldlm_lock *tmplock;
880         struct ldlm_lock *lock = NULL;
881
882         ENTRY;
883
884         /* Since all locks in an interval have the same extent, we can just
885          * use the first lock without kms_ignore set. */
886         list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
887                 if (ldlm_is_kms_ignore(tmplock))
888                         continue;
889
890                 lock = tmplock;
891
892                 break;
893         }
894
895         /* No locks in this interval without kms_ignore set */
896         if (!lock)
897                 RETURN(INTERVAL_ITER_CONT);
898
899         /* If we find a lock with a greater or equal kms, we are not the
900          * highest lock (or we share that distinction with another lock), and
901          * don't need to update KMS.  Return old_kms and stop looking. */
902         if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
903                 arg->kms = arg->old_kms;
904                 arg->complete = true;
905                 RETURN(INTERVAL_ITER_STOP);
906         }
907
908         if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
909                 arg->kms = lock->l_policy_data.l_extent.end + 1;
910
911         /* Since interval_iterate_reverse starts with the highest lock and
912          * works down, for PW locks, we only need to check if we should update
913          * the kms, then stop walking the tree.  PR locks are not exclusive, so
914          * the highest start does not imply the highest end and we must
915          * continue. (Only one group lock is allowed per resource, so this is
916          * irrelevant for group locks.)*/
917         if (lock->l_granted_mode == LCK_PW)
918                 RETURN(INTERVAL_ITER_STOP);
919         else
920                 RETURN(INTERVAL_ITER_CONT);
921 }
922
923 /* When a lock is cancelled by a client, the KMS may undergo change if this
924  * is the "highest lock".  This function returns the new KMS value, updating
925  * it only if we were the highest lock.
926  *
927  * Caller must hold lr_lock already.
928  *
929  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
930 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
931 {
932         struct ldlm_resource *res = lock->l_resource;
933         struct ldlm_interval_tree *tree;
934         struct ldlm_kms_shift_args args;
935         int idx = 0;
936
937         ENTRY;
938
939         args.old_kms = old_kms;
940         args.kms = 0;
941         args.complete = false;
942
943         /* don't let another thread in ldlm_extent_shift_kms race in
944          * just after we finish and take our lock into account in its
945          * calculation of the kms */
946         ldlm_set_kms_ignore(lock);
947
948         /* We iterate over the lock trees, looking for the largest kms smaller
949          * than the current one. */
950         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
951                 tree = &res->lr_itree[idx];
952
953                 /* If our already known kms is >= than the highest 'end' in
954                  * this tree, we don't need to check this tree, because
955                  * the kms from a tree can be lower than in_max_high (due to
956                  * kms_ignore), but it can never be higher. */
957                 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
958                         continue;
959
960                 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
961                                          &args);
962
963                 /* this tells us we're not the highest lock, so we don't need
964                  * to check the remaining trees */
965                 if (args.complete)
966                         break;
967         }
968
969         LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
970                  args.old_kms);
971
972         RETURN(args.kms);
973 }
974 EXPORT_SYMBOL(ldlm_extent_shift_kms);
975
976 struct kmem_cache *ldlm_interval_slab;
977 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
978 {
979         struct ldlm_interval *node;
980         ENTRY;
981
982         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
983         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
984         if (node == NULL)
985                 RETURN(NULL);
986
987         INIT_LIST_HEAD(&node->li_group);
988         ldlm_interval_attach(node, lock);
989         RETURN(node);
990 }
991
992 void ldlm_interval_free(struct ldlm_interval *node)
993 {
994         if (node) {
995                 LASSERT(list_empty(&node->li_group));
996                 LASSERT(!interval_is_intree(&node->li_node));
997                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
998         }
999 }
1000
1001 /* interval tree, for LDLM_EXTENT. */
1002 void ldlm_interval_attach(struct ldlm_interval *n,
1003                           struct ldlm_lock *l)
1004 {
1005         LASSERT(l->l_tree_node == NULL);
1006         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
1007
1008         list_add_tail(&l->l_sl_policy, &n->li_group);
1009         l->l_tree_node = n;
1010 }
1011
1012 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
1013 {
1014         struct ldlm_interval *n = l->l_tree_node;
1015
1016         if (n == NULL)
1017                 return NULL;
1018
1019         LASSERT(!list_empty(&n->li_group));
1020         l->l_tree_node = NULL;
1021         list_del_init(&l->l_sl_policy);
1022
1023         return list_empty(&n->li_group) ? n : NULL;
1024 }
1025
1026 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
1027 {
1028         int index;
1029
1030         LASSERT(mode != 0);
1031         LASSERT(is_power_of_2(mode));
1032         for (index = -1; mode != 0; index++, mode >>= 1)
1033                 /* do nothing */;
1034         LASSERT(index < LCK_MODE_NUM);
1035         return index;
1036 }
1037
1038 /** Add newly granted lock into interval tree for the resource. */
1039 void ldlm_extent_add_lock(struct ldlm_resource *res,
1040                           struct ldlm_lock *lock)
1041 {
1042         struct interval_node *found, **root;
1043         struct ldlm_interval *node;
1044         struct ldlm_extent *extent;
1045         int idx, rc;
1046
1047         LASSERT(lock->l_granted_mode == lock->l_req_mode);
1048
1049         node = lock->l_tree_node;
1050         LASSERT(node != NULL);
1051         LASSERT(!interval_is_intree(&node->li_node));
1052
1053         idx = ldlm_mode_to_index(lock->l_granted_mode);
1054         LASSERT(lock->l_granted_mode == 1 << idx);
1055         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1056
1057         /* node extent initialize */
1058         extent = &lock->l_policy_data.l_extent;
1059
1060         rc = interval_set(&node->li_node, extent->start, extent->end);
1061         LASSERT(!rc);
1062
1063         root = &res->lr_itree[idx].lit_root;
1064         found = interval_insert(&node->li_node, root);
1065         if (found) { /* The policy group found. */
1066                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1067                 LASSERT(tmp != NULL);
1068                 ldlm_interval_free(tmp);
1069                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1070         }
1071         res->lr_itree[idx].lit_size++;
1072
1073         /* even though we use interval tree to manage the extent lock, we also
1074          * add the locks into grant list, for debug purpose, .. */
1075         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1076
1077         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1078                 struct ldlm_lock *lck;
1079
1080                 list_for_each_entry_reverse(lck, &res->lr_granted,
1081                                             l_res_link) {
1082                         if (lck == lock)
1083                                 continue;
1084                         if (lockmode_compat(lck->l_granted_mode,
1085                                             lock->l_granted_mode))
1086                                 continue;
1087                         if (ldlm_extent_overlap(&lck->l_req_extent,
1088                                                 &lock->l_req_extent)) {
1089                                 CDEBUG(D_ERROR, "granting conflicting lock %p "
1090                                                 "%p\n", lck, lock);
1091                                 ldlm_resource_dump(D_ERROR, res);
1092                                 LBUG();
1093                         }
1094                 }
1095         }
1096 }
1097
1098 /** Remove cancelled lock from resource interval tree. */
1099 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1100 {
1101         struct ldlm_resource *res = lock->l_resource;
1102         struct ldlm_interval *node = lock->l_tree_node;
1103         struct ldlm_interval_tree *tree;
1104         int idx;
1105
1106         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1107                 return;
1108
1109         idx = ldlm_mode_to_index(lock->l_granted_mode);
1110         LASSERT(lock->l_granted_mode == 1 << idx);
1111         tree = &res->lr_itree[idx];
1112
1113         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1114
1115         tree->lit_size--;
1116         node = ldlm_interval_detach(lock);
1117         if (node) {
1118                 interval_erase(&node->li_node, &tree->lit_root);
1119                 ldlm_interval_free(node);
1120         }
1121 }
1122
1123 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1124                                       union ldlm_policy_data *lpolicy)
1125 {
1126         lpolicy->l_extent.start = wpolicy->l_extent.start;
1127         lpolicy->l_extent.end = wpolicy->l_extent.end;
1128         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1129 }
1130
1131 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1132                                       union ldlm_wire_policy_data *wpolicy)
1133 {
1134         memset(wpolicy, 0, sizeof(*wpolicy));
1135         wpolicy->l_extent.start = lpolicy->l_extent.start;
1136         wpolicy->l_extent.end = lpolicy->l_extent.end;
1137         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1138 }
1139