Whamcloud - gitweb
LU-13811 client: don't panic for mgs evictions
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ldlm/ldlm_extent.c
32  *
33  * Author: Peter Braam <braam@clusterfs.com>
34  * Author: Phil Schwan <phil@clusterfs.com>
35  */
36
37 /**
38  * This file contains implementation of EXTENT lock type
39  *
40  * EXTENT lock type is for locking a contiguous range of values, represented
41  * by 64-bit starting and ending offsets (inclusive). There are several extent
42  * lock modes, some of which may be mutually incompatible. Extent locks are
43  * considered incompatible if their modes are incompatible and their extents
44  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
45  */
46
47 #define DEBUG_SUBSYSTEM S_LDLM
48
49 #include <libcfs/libcfs.h>
50 #include <lustre_dlm.h>
51 #include <obd_support.h>
52 #include <obd.h>
53 #include <obd_class.h>
54 #include <lustre_lib.h>
55
56 #include "ldlm_internal.h"
57
58 #ifdef HAVE_SERVER_SUPPORT
59 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
60
61 /**
62  * Fix up the ldlm_extent after expanding it.
63  *
64  * After expansion has been done, we might still want to do certain adjusting
65  * based on overall contention of the resource and the like to avoid granting
66  * overly wide locks.
67  */
68 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
69                                               struct ldlm_extent *new_ex,
70                                               int conflicting)
71 {
72         enum ldlm_mode req_mode = req->l_req_mode;
73         __u64 req_start = req->l_req_extent.start;
74         __u64 req_end = req->l_req_extent.end;
75         __u64 req_align, mask;
76
77         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
78                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
79                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
80                                           new_ex->end);
81         }
82
83         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
84                 EXIT;
85                 return;
86         }
87
88         /* we need to ensure that the lock extent is properly aligned to what
89          * the client requested. Also we need to make sure it's also server
90          * page size aligned otherwise a server page can be covered by two
91          * write locks. */
92         mask = PAGE_SIZE;
93         req_align = (req_end + 1) | req_start;
94         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
95                 while ((req_align & mask) == 0)
96                         mask <<= 1;
97         }
98         mask -= 1;
99         /* We can only shrink the lock, not grow it.
100          * This should never cause lock to be smaller than requested,
101          * since requested lock was already aligned on these boundaries. */
102         new_ex->start = ((new_ex->start - 1) | mask) + 1;
103         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
104         LASSERTF(new_ex->start <= req_start,
105                  "mask %#llx grant start %llu req start %llu\n",
106                  mask, new_ex->start, req_start);
107         LASSERTF(new_ex->end >= req_end,
108                  "mask %#llx grant end %llu req end %llu\n",
109                  mask, new_ex->end, req_end);
110 }
111
112 /**
113  * Return the maximum extent that:
114  * - contains the requested extent
115  * - does not overlap existing conflicting extents outside the requested one
116  *
117  * This allows clients to request a small required extent range, but if there
118  * is no contention on the lock the full lock can be granted to the client.
119  * This avoids the need for many smaller lock requests to be granted in the
120  * common (uncontended) case.
121  *
122  * Use interval tree to expand the lock extent for granted lock.
123  */
124 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
125                                                 struct ldlm_extent *new_ex)
126 {
127         struct ldlm_resource *res = req->l_resource;
128         enum ldlm_mode req_mode = req->l_req_mode;
129         __u64 req_start = req->l_req_extent.start;
130         __u64 req_end = req->l_req_extent.end;
131         struct ldlm_interval_tree *tree;
132         struct interval_node_extent limiter = {
133                 .start  = new_ex->start,
134                 .end    = new_ex->end,
135         };
136         int conflicting = 0;
137         int idx;
138         ENTRY;
139
140         lockmode_verify(req_mode);
141
142         /* Using interval tree to handle the LDLM extent granted locks. */
143         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
144                 struct interval_node_extent ext = {
145                         .start  = req_start,
146                         .end    = req_end,
147                 };
148
149                 tree = &res->lr_itree[idx];
150                 if (lockmode_compat(tree->lit_mode, req_mode))
151                         continue;
152
153                 conflicting += tree->lit_size;
154                 if (conflicting > 4)
155                         limiter.start = req_start;
156
157                 if (interval_is_overlapped(tree->lit_root, &ext))
158                         CDEBUG(D_INFO, 
159                                "req_mode = %d, tree->lit_mode = %d, "
160                                "tree->lit_size = %d\n",
161                                req_mode, tree->lit_mode, tree->lit_size);
162                 interval_expand(tree->lit_root, &ext, &limiter);
163                 limiter.start = max(limiter.start, ext.start);
164                 limiter.end = min(limiter.end, ext.end);
165                 if (limiter.start == req_start && limiter.end == req_end)
166                         break;
167         }
168
169         new_ex->start = limiter.start;
170         new_ex->end = limiter.end;
171         LASSERT(new_ex->start <= req_start);
172         LASSERT(new_ex->end >= req_end);
173
174         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
175         EXIT;
176 }
177
178 /* The purpose of this function is to return:
179  * - the maximum extent
180  * - containing the requested extent
181  * - and not overlapping existing conflicting extents outside the requested one
182  */
183 static void
184 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
185                                     struct ldlm_extent *new_ex)
186 {
187         struct ldlm_resource *res = req->l_resource;
188         enum ldlm_mode req_mode = req->l_req_mode;
189         __u64 req_start = req->l_req_extent.start;
190         __u64 req_end = req->l_req_extent.end;
191         struct ldlm_lock *lock;
192         int conflicting = 0;
193         ENTRY;
194
195         lockmode_verify(req_mode);
196
197         /* for waiting locks */
198         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
199                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
200
201                 /* We already hit the minimum requested size, search no more */
202                 if (new_ex->start == req_start && new_ex->end == req_end) {
203                         EXIT;
204                         return;
205                 }
206
207                 /* Don't conflict with ourselves */
208                 if (req == lock)
209                         continue;
210
211                 /* Locks are compatible, overlap doesn't matter */
212                 /* Until bug 20 is fixed, try to avoid granting overlapping
213                  * locks on one client (they take a long time to cancel) */
214                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
215                     lock->l_export != req->l_export)
216                         continue;
217
218                 /* If this is a high-traffic lock, don't grow downwards at all
219                  * or grow upwards too much */
220                 ++conflicting;
221                 if (conflicting > 4)
222                         new_ex->start = req_start;
223
224                 /* If lock doesn't overlap new_ex, skip it. */
225                 if (!ldlm_extent_overlap(l_extent, new_ex))
226                         continue;
227
228                 /* Locks conflicting in requested extents and we can't satisfy
229                  * both locks, so ignore it.  Either we will ping-pong this
230                  * extent (we would regardless of what extent we granted) or
231                  * lock is unused and it shouldn't limit our extent growth. */
232                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
233                         continue;
234
235                 /* We grow extents downwards only as far as they don't overlap
236                  * with already-granted locks, on the assumption that clients
237                  * will be writing beyond the initial requested end and would
238                  * then need to enqueue a new lock beyond previous request.
239                  * l_req_extent->end strictly < req_start, checked above. */
240                 if (l_extent->start < req_start && new_ex->start != req_start) {
241                         if (l_extent->end >= req_start)
242                                 new_ex->start = req_start;
243                         else
244                                 new_ex->start = min(l_extent->end+1, req_start);
245                 }
246
247                 /* If we need to cancel this lock anyways because our request
248                  * overlaps the granted lock, we grow up to its requested
249                  * extent start instead of limiting this extent, assuming that
250                  * clients are writing forwards and the lock had over grown
251                  * its extent downwards before we enqueued our request. */
252                 if (l_extent->end > req_end) {
253                         if (l_extent->start <= req_end)
254                                 new_ex->end = max(lock->l_req_extent.start - 1,
255                                                   req_end);
256                         else
257                                 new_ex->end = max(l_extent->start - 1, req_end);
258                 }
259         }
260
261         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
262         EXIT;
263 }
264
265
266 /* In order to determine the largest possible extent we can grant, we need
267  * to scan all of the queues. */
268 static void ldlm_extent_policy(struct ldlm_resource *res,
269                                struct ldlm_lock *lock, __u64 *flags)
270 {
271         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
272
273         if (lock->l_export == NULL)
274                 /*
275                  * this is a local lock taken by server (e.g., as a part of
276                  * OST-side locking, or unlink handling). Expansion doesn't
277                  * make a lot of sense for local locks, because they are
278                  * dropped immediately on operation completion and would only
279                  * conflict with other threads.
280                  */
281                 return;
282
283         if (lock->l_policy_data.l_extent.start == 0 &&
284             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
285                 /* fast-path whole file locks */
286                 return;
287
288         /* Because reprocess_queue zeroes flags and uses it to return
289          * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
290          * in the lock flags rather than the 'flags' argument */
291         if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
292                 ldlm_extent_internal_policy_granted(lock, &new_ex);
293                 ldlm_extent_internal_policy_waiting(lock, &new_ex);
294         } else {
295                 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
296                 new_ex.start = lock->l_policy_data.l_extent.start;
297                 new_ex.end = lock->l_policy_data.l_extent.end;
298                 /* In case the request is not on correct boundaries, we call
299                  * fixup. (normally called in ldlm_extent_internal_policy_*) */
300                 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
301         }
302
303         if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
304                 *flags |= LDLM_FL_LOCK_CHANGED;
305                 lock->l_policy_data.l_extent.start = new_ex.start;
306                 lock->l_policy_data.l_extent.end = new_ex.end;
307         }
308 }
309
310 static bool ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
311 {
312         struct ldlm_resource *res = lock->l_resource;
313         time64_t now = ktime_get_seconds();
314
315         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
316                 return true;
317
318         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
319         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
320                 res->lr_contention_time = now;
321
322         return now < res->lr_contention_time +
323                      ldlm_res_to_ns(res)->ns_contention_time;
324 }
325
326 struct ldlm_extent_compat_args {
327         struct list_head *work_list;
328         struct ldlm_lock *lock;
329         enum ldlm_mode mode;
330         int *locks;
331         int *compat;
332 };
333
334 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
335                                                 void *data)
336 {
337         struct ldlm_extent_compat_args *priv = data;
338         struct ldlm_interval *node = to_ldlm_interval(n);
339         struct ldlm_extent *extent;
340         struct list_head *work_list = priv->work_list;
341         struct ldlm_lock *lock, *enq = priv->lock;
342         enum ldlm_mode mode = priv->mode;
343         int count = 0;
344         ENTRY;
345
346         LASSERT(!list_empty(&node->li_group));
347
348         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
349                 /* interval tree is for granted lock */
350                 LASSERTF(mode == lock->l_granted_mode,
351                          "mode = %s, lock->l_granted_mode = %s\n",
352                          ldlm_lockname[mode],
353                          ldlm_lockname[lock->l_granted_mode]);
354                 count++;
355                 if (lock->l_blocking_ast &&
356                     lock->l_granted_mode != LCK_GROUP)
357                         ldlm_add_ast_work_item(lock, enq, work_list);
358         }
359
360         /* don't count conflicting glimpse locks */
361         extent = ldlm_interval_extent(node);
362         if (!(mode == LCK_PR &&
363             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
364                 *priv->locks += count;
365
366         if (priv->compat)
367                 *priv->compat = 0;
368
369         RETURN(INTERVAL_ITER_CONT);
370 }
371
372 /**
373  * Determine if the lock is compatible with all locks on the queue.
374  *
375  * If \a work_list is provided, conflicting locks are linked there.
376  * If \a work_list is not provided, we exit this function on first conflict.
377  *
378  * \retval 0 if the lock is not compatible
379  * \retval 1 if the lock is compatible
380  * \retval 2 if \a req is a group lock and it is compatible and requires
381  *           no further checking
382  * \retval negative error, such as EAGAIN for group locks
383  */
384 static int
385 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
386                          __u64 *flags, struct list_head *work_list,
387                          int *contended_locks)
388 {
389         struct ldlm_resource *res = req->l_resource;
390         enum ldlm_mode req_mode = req->l_req_mode;
391         __u64 req_start = req->l_req_extent.start;
392         __u64 req_end = req->l_req_extent.end;
393         struct ldlm_lock *lock;
394         int check_contention;
395         int compat = 1;
396         ENTRY;
397
398         lockmode_verify(req_mode);
399
400         /* Using interval tree for granted lock */
401         if (queue == &res->lr_granted) {
402                 struct ldlm_interval_tree *tree;
403                 struct ldlm_extent_compat_args data = {.work_list = work_list,
404                                                .lock = req,
405                                                .locks = contended_locks,
406                                                .compat = &compat };
407                 struct interval_node_extent ex = { .start = req_start,
408                                                    .end = req_end };
409                 int idx, rc;
410
411                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
412                         tree = &res->lr_itree[idx];
413                         if (tree->lit_root == NULL) /* empty tree, skipped */
414                                 continue;
415
416                         data.mode = tree->lit_mode;
417                         if (lockmode_compat(req_mode, tree->lit_mode)) {
418                                 struct ldlm_interval *node;
419                                 struct ldlm_extent *extent;
420
421                                 if (req_mode != LCK_GROUP)
422                                         continue;
423
424                                 /* group lock, grant it immediately if
425                                  * compatible */
426                                 node = to_ldlm_interval(tree->lit_root);
427                                 extent = ldlm_interval_extent(node);
428                                 if (req->l_policy_data.l_extent.gid ==
429                                     extent->gid)
430                                         RETURN(2);
431                         }
432
433                         if (tree->lit_mode == LCK_GROUP) {
434                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
435                                               LDLM_FL_SPECULATIVE)) {
436                                         compat = -EAGAIN;
437                                         goto destroylock;
438                                 }
439
440                                 if (!work_list)
441                                         RETURN(0);
442
443                                 /* if work list is not NULL,add all
444                                    locks in the tree to work list */
445                                 compat = 0;
446                                 interval_iterate(tree->lit_root,
447                                                  ldlm_extent_compat_cb, &data);
448                                 continue;
449                         }
450
451                         /* We've found a potentially blocking lock, check
452                          * compatibility.  This handles locks other than GROUP
453                          * locks, which are handled separately above.
454                          *
455                          * Locks with FL_SPECULATIVE are asynchronous requests
456                          * which must never wait behind another lock, so they
457                          * fail if any conflicting lock is found. */
458                         if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
459                                 rc = interval_is_overlapped(tree->lit_root,
460                                                             &ex);
461                                 if (rc) {
462                                         if (!work_list) {
463                                                 RETURN(0);
464                                         } else {
465                                                 compat = -EAGAIN;
466                                                 goto destroylock;
467                                         }
468                                 }
469                         } else {
470                                 interval_search(tree->lit_root, &ex,
471                                                 ldlm_extent_compat_cb, &data);
472                                 if (!list_empty(work_list) && compat)
473                                         compat = 0;
474                         }
475                 }
476         } else { /* for waiting queue */
477                 list_for_each_entry(lock, queue, l_res_link) {
478                         check_contention = 1;
479
480                         /* We stop walking the queue if we hit ourselves so
481                          * we don't take conflicting locks enqueued after us
482                          * into account, or we'd wait forever. */
483                         if (req == lock)
484                                 break;
485
486                         /* locks are compatible, overlap doesn't matter */
487                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
488                                 if (req_mode == LCK_PR &&
489                                     ((lock->l_policy_data.l_extent.start <=
490                                       req->l_policy_data.l_extent.start) &&
491                                      (lock->l_policy_data.l_extent.end >=
492                                       req->l_policy_data.l_extent.end))) {
493                                         /* If we met a PR lock just like us or
494                                            wider, and nobody down the list
495                                            conflicted with it, that means we
496                                            can skip processing of the rest of
497                                            the list and safely place ourselves
498                                            at the end of the list, or grant
499                                            (dependent if we met an conflicting
500                                            locks before in the list).  In case
501                                            of 1st enqueue only we continue
502                                            traversing if there is something
503                                            conflicting down the list because
504                                            we need to make sure that something
505                                            is marked as AST_SENT as well, in
506                                            cse of empy worklist we would exit
507                                            on first conflict met. */
508                                         /* There IS a case where such flag is
509                                            not set for a lock, yet it blocks
510                                            something. Luckily for us this is
511                                            only during destroy, so lock is
512                                            exclusive. So here we are safe */
513                                         if (!ldlm_is_ast_sent(lock))
514                                                 RETURN(compat);
515                                 }
516
517                                 /* non-group locks are compatible, overlap doesn't
518                                    matter */
519                                 if (likely(req_mode != LCK_GROUP))
520                                         continue;
521
522                                 /* If we are trying to get a GROUP lock and there is
523                                    another one of this kind, we need to compare gid */
524                                 if (req->l_policy_data.l_extent.gid ==
525                                     lock->l_policy_data.l_extent.gid) {
526                                         /* If existing lock with matched gid is granted,
527                                            we grant new one too. */
528                                         if (ldlm_is_granted(lock))
529                                                 RETURN(2);
530
531                                         /* Otherwise we are scanning queue of waiting
532                                          * locks and it means current request would
533                                          * block along with existing lock (that is
534                                          * already blocked.
535                                          * If we are in nonblocking mode - return
536                                          * immediately */
537                                         if (*flags & (LDLM_FL_BLOCK_NOWAIT
538                                                       | LDLM_FL_SPECULATIVE)) {
539                                                 compat = -EAGAIN;
540                                                 goto destroylock;
541                                         }
542                                         /* If this group lock is compatible with another
543                                          * group lock on the waiting list, they must be
544                                          * together in the list, so they can be granted
545                                          * at the same time.  Otherwise the later lock
546                                          * can get stuck behind another, incompatible,
547                                          * lock. */
548                                         ldlm_resource_insert_lock_after(lock, req);
549                                         /* Because 'lock' is not granted, we can stop
550                                          * processing this queue and return immediately.
551                                          * There is no need to check the rest of the
552                                          * list. */
553                                         RETURN(0);
554                                 }
555                         }
556
557                         if (unlikely(req_mode == LCK_GROUP &&
558                                      !ldlm_is_granted(lock))) {
559                                 compat = 0;
560                                 if (lock->l_req_mode != LCK_GROUP) {
561                                         /* Ok, we hit non-GROUP lock, there should be no
562                                            more GROUP locks later on, queue in front of
563                                            first non-GROUP lock */
564
565                                         ldlm_resource_insert_lock_before(lock, req);
566                                         break;
567                                 }
568                                 LASSERT(req->l_policy_data.l_extent.gid !=
569                                         lock->l_policy_data.l_extent.gid);
570                                 continue;
571                         }
572
573                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
574                                 /* If compared lock is GROUP, then requested is
575                                  * PR/PW so this is not compatible; extent
576                                  * range does not matter */
577                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT
578                                               | LDLM_FL_SPECULATIVE)) {
579                                         compat = -EAGAIN;
580                                         goto destroylock;
581                                 }
582                         } else if (lock->l_policy_data.l_extent.end < req_start ||
583                                    lock->l_policy_data.l_extent.start > req_end) {
584                                 /* if a non group lock doesn't overlap skip it */
585                                 continue;
586                         } else if (lock->l_req_extent.end < req_start ||
587                                    lock->l_req_extent.start > req_end) {
588                                 /* false contention, the requests doesn't really overlap */
589                                 check_contention = 0;
590                         }
591
592                         if (!work_list)
593                                 RETURN(0);
594
595                         if (*flags & LDLM_FL_SPECULATIVE) {
596                                 compat = -EAGAIN;
597                                 goto destroylock;
598                         }
599
600                         /* don't count conflicting glimpse locks */
601                         if (lock->l_req_mode == LCK_PR &&
602                             lock->l_policy_data.l_extent.start == 0 &&
603                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
604                                 check_contention = 0;
605
606                         *contended_locks += check_contention;
607
608                         compat = 0;
609                         if (lock->l_blocking_ast &&
610                             lock->l_req_mode != LCK_GROUP)
611                                 ldlm_add_ast_work_item(lock, req, work_list);
612                 }
613         }
614
615         if (ldlm_check_contention(req, *contended_locks) &&
616             compat == 0 &&
617             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
618             req->l_req_mode != LCK_GROUP &&
619             req_end - req_start <=
620             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
621                 GOTO(destroylock, compat = -EUSERS);
622
623         RETURN(compat);
624 destroylock:
625         list_del_init(&req->l_res_link);
626         ldlm_lock_destroy_nolock(req);
627         RETURN(compat);
628 }
629
630 /**
631  * This function refresh eviction timer for cancelled lock.
632  * \param[in] lock              ldlm lock for refresh
633  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
634  *                              and counter are used
635  */
636 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
637                            struct ldlm_prolong_args *arg)
638 {
639         timeout_t timeout;
640
641         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
642
643         if (arg->lpa_export != lock->l_export ||
644             lock->l_flags & LDLM_FL_DESTROYED)
645                 /* ignore unrelated locks */
646                 return;
647
648         arg->lpa_locks_cnt++;
649
650         if (!(lock->l_flags & LDLM_FL_AST_SENT))
651                 /* ignore locks not being cancelled */
652                 return;
653
654         /* We are in the middle of the process - BL AST is sent, CANCEL
655          * is ahead. Take half of BL AT + IO AT process time.
656          */
657         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
658
659         arg->lpa_blocks_cnt++;
660
661         /* OK. this is a possible lock the user holds doing I/O
662          * let's refresh eviction timer for it.
663          */
664         ldlm_refresh_waiting_lock(lock, timeout);
665 }
666 EXPORT_SYMBOL(ldlm_lock_prolong_one);
667
668 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
669                                                    void *data)
670 {
671         struct ldlm_prolong_args *arg = data;
672         struct ldlm_interval *node = to_ldlm_interval(n);
673         struct ldlm_lock *lock;
674
675         ENTRY;
676
677         LASSERT(!list_empty(&node->li_group));
678
679         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
680                 ldlm_lock_prolong_one(lock, arg);
681         }
682
683         RETURN(INTERVAL_ITER_CONT);
684 }
685
686 /**
687  * Walk through granted tree and prolong locks if they overlaps extent.
688  *
689  * \param[in] arg               prolong args
690  */
691 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
692 {
693         struct ldlm_interval_tree *tree;
694         struct ldlm_resource *res;
695         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
696                                            .end = arg->lpa_extent.end };
697         int idx;
698
699         ENTRY;
700
701         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
702                                 &arg->lpa_resid, LDLM_EXTENT, 0);
703         if (IS_ERR(res)) {
704                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
705                        arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
706                 RETURN_EXIT;
707         }
708
709         lock_res(res);
710         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
711                 tree = &res->lr_itree[idx];
712                 if (tree->lit_root == NULL) /* empty tree, skipped */
713                         continue;
714
715                 /* There is no possibility to check for the groupID
716                  * so all the group locks are considered as valid
717                  * here, especially because the client is supposed
718                  * to check it has such a lock before sending an RPC.
719                  */
720                 if (!(tree->lit_mode & arg->lpa_mode))
721                         continue;
722
723                 interval_search(tree->lit_root, &ex,
724                                 ldlm_resource_prolong_cb, arg);
725         }
726
727         unlock_res(res);
728         ldlm_resource_putref(res);
729
730         EXIT;
731 }
732 EXPORT_SYMBOL(ldlm_resource_prolong);
733
734 /**
735  * Process a granting attempt for extent lock.
736  * Must be called with ns lock held.
737  *
738  * This function looks for any conflicts for \a lock in the granted or
739  * waiting queues. The lock is granted if no conflicts are found in
740  * either queue.
741  */
742 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
743                              enum ldlm_process_intention intention,
744                              enum ldlm_error *err, struct list_head *work_list)
745 {
746         struct ldlm_resource *res = lock->l_resource;
747         int rc, rc2 = 0;
748         int contended_locks = 0;
749         struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
750                                                         NULL : work_list;
751         ENTRY;
752
753         LASSERT(!ldlm_is_granted(lock));
754         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
755                 !ldlm_is_ast_discard_data(lock));
756         check_res_locked(res);
757         *err = ELDLM_OK;
758
759         if (intention == LDLM_PROCESS_RESCAN) {
760                 /* Careful observers will note that we don't handle -EAGAIN
761                  * here, but it's ok for a non-obvious reason -- compat_queue
762                  * can only return -EAGAIN if (flags & BLOCK_NOWAIT |
763                  * SPECULATIVE). flags should always be zero here, and if that
764                  * ever stops being true, we want to find out. */
765                 LASSERT(*flags == 0);
766                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
767                                               NULL, &contended_locks);
768                 if (rc == 1) {
769                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
770                                                       flags, NULL,
771                                                       &contended_locks);
772                 }
773                 if (rc == 0)
774                         RETURN(LDLM_ITER_STOP);
775
776                 ldlm_resource_unlink_lock(lock);
777
778                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
779                         ldlm_extent_policy(res, lock, flags);
780                 ldlm_grant_lock(lock, grant_work);
781                 RETURN(LDLM_ITER_CONTINUE);
782         }
783
784         contended_locks = 0;
785         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
786                                       work_list, &contended_locks);
787         if (rc < 0)
788                 GOTO(out, *err = rc);
789
790         if (rc != 2) {
791                 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
792                                                flags, work_list,
793                                                &contended_locks);
794                 if (rc2 < 0)
795                         GOTO(out, *err = rc = rc2);
796         }
797
798         if (rc + rc2 == 2) {
799                 ldlm_extent_policy(res, lock, flags);
800                 ldlm_resource_unlink_lock(lock);
801                 ldlm_grant_lock(lock, grant_work);
802         } else {
803                 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
804                  * force client to wait for the lock endlessly once
805                  * the lock is enqueued -bzzz */
806                 *flags |= LDLM_FL_NO_TIMEOUT;
807         }
808
809         RETURN(LDLM_ITER_CONTINUE);
810 out:
811         return rc;
812 }
813 #endif /* HAVE_SERVER_SUPPORT */
814
815 struct ldlm_kms_shift_args {
816         __u64   old_kms;
817         __u64   kms;
818         bool    complete;
819 };
820
821 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
822 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
823                                             void *args)
824 {
825         struct ldlm_kms_shift_args *arg = args;
826         struct ldlm_interval *node = to_ldlm_interval(n);
827         struct ldlm_lock *tmplock;
828         struct ldlm_lock *lock = NULL;
829
830         ENTRY;
831
832         /* Since all locks in an interval have the same extent, we can just
833          * use the first lock without kms_ignore set. */
834         list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
835                 if (ldlm_is_kms_ignore(tmplock))
836                         continue;
837
838                 lock = tmplock;
839
840                 break;
841         }
842
843         /* No locks in this interval without kms_ignore set */
844         if (!lock)
845                 RETURN(INTERVAL_ITER_CONT);
846
847         /* If we find a lock with a greater or equal kms, we are not the
848          * highest lock (or we share that distinction with another lock), and
849          * don't need to update KMS.  Return old_kms and stop looking. */
850         if (lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF ||
851             lock->l_policy_data.l_extent.end + 1 >= arg->old_kms) {
852                 arg->kms = arg->old_kms;
853                 arg->complete = true;
854                 RETURN(INTERVAL_ITER_STOP);
855         }
856
857         if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
858                 arg->kms = lock->l_policy_data.l_extent.end + 1;
859
860         /* Since interval_iterate_reverse starts with the highest lock and
861          * works down, for PW locks, we only need to check if we should update
862          * the kms, then stop walking the tree.  PR locks are not exclusive, so
863          * the highest start does not imply the highest end and we must
864          * continue. (Only one group lock is allowed per resource, so this is
865          * irrelevant for group locks.)*/
866         if (lock->l_granted_mode == LCK_PW)
867                 RETURN(INTERVAL_ITER_STOP);
868         else
869                 RETURN(INTERVAL_ITER_CONT);
870 }
871
872 /* When a lock is cancelled by a client, the KMS may undergo change if this
873  * is the "highest lock".  This function returns the new KMS value, updating
874  * it only if we were the highest lock.
875  *
876  * Caller must hold lr_lock already.
877  *
878  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
879 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
880 {
881         struct ldlm_resource *res = lock->l_resource;
882         struct ldlm_interval_tree *tree;
883         struct ldlm_kms_shift_args args;
884         int idx = 0;
885
886         ENTRY;
887
888         args.old_kms = old_kms;
889         args.kms = 0;
890         args.complete = false;
891
892         /* don't let another thread in ldlm_extent_shift_kms race in
893          * just after we finish and take our lock into account in its
894          * calculation of the kms */
895         ldlm_set_kms_ignore(lock);
896
897         /* We iterate over the lock trees, looking for the largest kms smaller
898          * than the current one. */
899         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
900                 tree = &res->lr_itree[idx];
901
902                 /* If our already known kms is >= than the highest 'end' in
903                  * this tree, we don't need to check this tree, because
904                  * the kms from a tree can be lower than in_max_high (due to
905                  * kms_ignore), but it can never be higher. */
906                 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
907                         continue;
908
909                 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
910                                          &args);
911
912                 /* this tells us we're not the highest lock, so we don't need
913                  * to check the remaining trees */
914                 if (args.complete)
915                         break;
916         }
917
918         LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
919                  args.old_kms);
920
921         RETURN(args.kms);
922 }
923 EXPORT_SYMBOL(ldlm_extent_shift_kms);
924
925 struct kmem_cache *ldlm_interval_slab;
926 static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
927 {
928         struct ldlm_interval *node;
929         ENTRY;
930
931         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
932         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
933         if (node == NULL)
934                 RETURN(NULL);
935
936         INIT_LIST_HEAD(&node->li_group);
937         ldlm_interval_attach(node, lock);
938         RETURN(node);
939 }
940
941 void ldlm_interval_free(struct ldlm_interval *node)
942 {
943         if (node) {
944                 LASSERT(list_empty(&node->li_group));
945                 LASSERT(!interval_is_intree(&node->li_node));
946                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
947         }
948 }
949
950 /* interval tree, for LDLM_EXTENT. */
951 void ldlm_interval_attach(struct ldlm_interval *n,
952                           struct ldlm_lock *l)
953 {
954         LASSERT(l->l_tree_node == NULL);
955         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
956
957         list_add_tail(&l->l_sl_policy, &n->li_group);
958         l->l_tree_node = n;
959 }
960
961 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
962 {
963         struct ldlm_interval *n = l->l_tree_node;
964
965         if (n == NULL)
966                 return NULL;
967
968         LASSERT(!list_empty(&n->li_group));
969         l->l_tree_node = NULL;
970         list_del_init(&l->l_sl_policy);
971
972         return list_empty(&n->li_group) ? n : NULL;
973 }
974
975 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
976 {
977         int index;
978
979         LASSERT(mode != 0);
980         LASSERT(is_power_of_2(mode));
981         index = ilog2(mode);
982         LASSERT(index < LCK_MODE_NUM);
983         return index;
984 }
985
986 int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
987 {
988         lock->l_tree_node = NULL;
989         if (ldlm_interval_alloc(lock) == NULL)
990                 return -ENOMEM;
991         return 0;
992 }
993
994 /** Add newly granted lock into interval tree for the resource. */
995 void ldlm_extent_add_lock(struct ldlm_resource *res,
996                           struct ldlm_lock *lock)
997 {
998         struct interval_node *found, **root;
999         struct ldlm_interval *node;
1000         struct ldlm_extent *extent;
1001         int idx, rc;
1002
1003         LASSERT(ldlm_is_granted(lock));
1004
1005         node = lock->l_tree_node;
1006         LASSERT(node != NULL);
1007         LASSERT(!interval_is_intree(&node->li_node));
1008
1009         idx = ldlm_mode_to_index(lock->l_granted_mode);
1010         LASSERT(lock->l_granted_mode == BIT(idx));
1011         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1012
1013         /* node extent initialize */
1014         extent = &lock->l_policy_data.l_extent;
1015
1016         rc = interval_set(&node->li_node, extent->start, extent->end);
1017         LASSERT(!rc);
1018
1019         root = &res->lr_itree[idx].lit_root;
1020         found = interval_insert(&node->li_node, root);
1021         if (found) { /* The policy group found. */
1022                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1023                 LASSERT(tmp != NULL);
1024                 ldlm_interval_free(tmp);
1025                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1026         }
1027         res->lr_itree[idx].lit_size++;
1028
1029         /* even though we use interval tree to manage the extent lock, we also
1030          * add the locks into grant list, for debug purpose, .. */
1031         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1032
1033         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1034                 struct ldlm_lock *lck;
1035
1036                 list_for_each_entry_reverse(lck, &res->lr_granted,
1037                                             l_res_link) {
1038                         if (lck == lock)
1039                                 continue;
1040                         if (lockmode_compat(lck->l_granted_mode,
1041                                             lock->l_granted_mode))
1042                                 continue;
1043                         if (ldlm_extent_overlap(&lck->l_req_extent,
1044                                                 &lock->l_req_extent)) {
1045                                 CDEBUG(D_ERROR, "granting conflicting lock %p "
1046                                                 "%p\n", lck, lock);
1047                                 ldlm_resource_dump(D_ERROR, res);
1048                                 LBUG();
1049                         }
1050                 }
1051         }
1052 }
1053
1054 /** Remove cancelled lock from resource interval tree. */
1055 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1056 {
1057         struct ldlm_resource *res = lock->l_resource;
1058         struct ldlm_interval *node = lock->l_tree_node;
1059         struct ldlm_interval_tree *tree;
1060         int idx;
1061
1062         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1063                 return;
1064
1065         idx = ldlm_mode_to_index(lock->l_granted_mode);
1066         LASSERT(lock->l_granted_mode == BIT(idx));
1067         tree = &res->lr_itree[idx];
1068
1069         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1070
1071         tree->lit_size--;
1072         node = ldlm_interval_detach(lock);
1073         if (node) {
1074                 interval_erase(&node->li_node, &tree->lit_root);
1075                 ldlm_interval_free(node);
1076         }
1077 }
1078
1079 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1080                                       union ldlm_policy_data *lpolicy)
1081 {
1082         lpolicy->l_extent.start = wpolicy->l_extent.start;
1083         lpolicy->l_extent.end = wpolicy->l_extent.end;
1084         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1085 }
1086
1087 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1088                                       union ldlm_wire_policy_data *wpolicy)
1089 {
1090         memset(wpolicy, 0, sizeof(*wpolicy));
1091         wpolicy->l_extent.start = lpolicy->l_extent.start;
1092         wpolicy->l_extent.end = lpolicy->l_extent.end;
1093         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1094 }
1095