Whamcloud - gitweb
LU-13645 ldlm: re-process ldlm lock cleanup
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_extent.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 /**
39  * This file contains implementation of EXTENT lock type
40  *
41  * EXTENT lock type is for locking a contiguous range of values, represented
42  * by 64-bit starting and ending offsets (inclusive). There are several extent
43  * lock modes, some of which may be mutually incompatible. Extent locks are
44  * considered incompatible if their modes are incompatible and their extents
45  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LDLM
49
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
53 #include <obd.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
56
57 #include "ldlm_internal.h"
58
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
61
62 /**
63  * Fix up the ldlm_extent after expanding it.
64  *
65  * After expansion has been done, we might still want to do certain adjusting
66  * based on overall contention of the resource and the like to avoid granting
67  * overly wide locks.
68  */
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70                                               struct ldlm_extent *new_ex,
71                                               int conflicting)
72 {
73         enum ldlm_mode req_mode = req->l_req_mode;
74         __u64 req_start = req->l_req_extent.start;
75         __u64 req_end = req->l_req_extent.end;
76         __u64 req_align, mask;
77
78         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
81                                           new_ex->end);
82         }
83
84         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
85                 EXIT;
86                 return;
87         }
88
89         /* we need to ensure that the lock extent is properly aligned to what
90          * the client requested. Also we need to make sure it's also server
91          * page size aligned otherwise a server page can be covered by two
92          * write locks. */
93         mask = PAGE_SIZE;
94         req_align = (req_end + 1) | req_start;
95         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96                 while ((req_align & mask) == 0)
97                         mask <<= 1;
98         }
99         mask -= 1;
100         /* We can only shrink the lock, not grow it.
101          * This should never cause lock to be smaller than requested,
102          * since requested lock was already aligned on these boundaries. */
103         new_ex->start = ((new_ex->start - 1) | mask) + 1;
104         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105         LASSERTF(new_ex->start <= req_start,
106                  "mask %#llx grant start %llu req start %llu\n",
107                  mask, new_ex->start, req_start);
108         LASSERTF(new_ex->end >= req_end,
109                  "mask %#llx grant end %llu req end %llu\n",
110                  mask, new_ex->end, req_end);
111 }
112
113 /**
114  * Return the maximum extent that:
115  * - contains the requested extent
116  * - does not overlap existing conflicting extents outside the requested one
117  *
118  * This allows clients to request a small required extent range, but if there
119  * is no contention on the lock the full lock can be granted to the client.
120  * This avoids the need for many smaller lock requests to be granted in the
121  * common (uncontended) case.
122  *
123  * Use interval tree to expand the lock extent for granted lock.
124  */
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126                                                 struct ldlm_extent *new_ex)
127 {
128         struct ldlm_resource *res = req->l_resource;
129         enum ldlm_mode req_mode = req->l_req_mode;
130         __u64 req_start = req->l_req_extent.start;
131         __u64 req_end = req->l_req_extent.end;
132         struct ldlm_interval_tree *tree;
133         struct interval_node_extent limiter = {
134                 .start  = new_ex->start,
135                 .end    = new_ex->end,
136         };
137         int conflicting = 0;
138         int idx;
139         ENTRY;
140
141         lockmode_verify(req_mode);
142
143         /* Using interval tree to handle the LDLM extent granted locks. */
144         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145                 struct interval_node_extent ext = {
146                         .start  = req_start,
147                         .end    = req_end,
148                 };
149
150                 tree = &res->lr_itree[idx];
151                 if (lockmode_compat(tree->lit_mode, req_mode))
152                         continue;
153
154                 conflicting += tree->lit_size;
155                 if (conflicting > 4)
156                         limiter.start = req_start;
157
158                 if (interval_is_overlapped(tree->lit_root, &ext))
159                         CDEBUG(D_INFO, 
160                                "req_mode = %d, tree->lit_mode = %d, "
161                                "tree->lit_size = %d\n",
162                                req_mode, tree->lit_mode, tree->lit_size);
163                 interval_expand(tree->lit_root, &ext, &limiter);
164                 limiter.start = max(limiter.start, ext.start);
165                 limiter.end = min(limiter.end, ext.end);
166                 if (limiter.start == req_start && limiter.end == req_end)
167                         break;
168         }
169
170         new_ex->start = limiter.start;
171         new_ex->end = limiter.end;
172         LASSERT(new_ex->start <= req_start);
173         LASSERT(new_ex->end >= req_end);
174
175         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
176         EXIT;
177 }
178
179 /* The purpose of this function is to return:
180  * - the maximum extent
181  * - containing the requested extent
182  * - and not overlapping existing conflicting extents outside the requested one
183  */
184 static void
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186                                     struct ldlm_extent *new_ex)
187 {
188         struct ldlm_resource *res = req->l_resource;
189         enum ldlm_mode req_mode = req->l_req_mode;
190         __u64 req_start = req->l_req_extent.start;
191         __u64 req_end = req->l_req_extent.end;
192         struct ldlm_lock *lock;
193         int conflicting = 0;
194         ENTRY;
195
196         lockmode_verify(req_mode);
197
198         /* for waiting locks */
199         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
201
202                 /* We already hit the minimum requested size, search no more */
203                 if (new_ex->start == req_start && new_ex->end == req_end) {
204                         EXIT;
205                         return;
206                 }
207
208                 /* Don't conflict with ourselves */
209                 if (req == lock)
210                         continue;
211
212                 /* Locks are compatible, overlap doesn't matter */
213                 /* Until bug 20 is fixed, try to avoid granting overlapping
214                  * locks on one client (they take a long time to cancel) */
215                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216                     lock->l_export != req->l_export)
217                         continue;
218
219                 /* If this is a high-traffic lock, don't grow downwards at all
220                  * or grow upwards too much */
221                 ++conflicting;
222                 if (conflicting > 4)
223                         new_ex->start = req_start;
224
225                 /* If lock doesn't overlap new_ex, skip it. */
226                 if (!ldlm_extent_overlap(l_extent, new_ex))
227                         continue;
228
229                 /* Locks conflicting in requested extents and we can't satisfy
230                  * both locks, so ignore it.  Either we will ping-pong this
231                  * extent (we would regardless of what extent we granted) or
232                  * lock is unused and it shouldn't limit our extent growth. */
233                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234                         continue;
235
236                 /* We grow extents downwards only as far as they don't overlap
237                  * with already-granted locks, on the assumption that clients
238                  * will be writing beyond the initial requested end and would
239                  * then need to enqueue a new lock beyond previous request.
240                  * l_req_extent->end strictly < req_start, checked above. */
241                 if (l_extent->start < req_start && new_ex->start != req_start) {
242                         if (l_extent->end >= req_start)
243                                 new_ex->start = req_start;
244                         else
245                                 new_ex->start = min(l_extent->end+1, req_start);
246                 }
247
248                 /* If we need to cancel this lock anyways because our request
249                  * overlaps the granted lock, we grow up to its requested
250                  * extent start instead of limiting this extent, assuming that
251                  * clients are writing forwards and the lock had over grown
252                  * its extent downwards before we enqueued our request. */
253                 if (l_extent->end > req_end) {
254                         if (l_extent->start <= req_end)
255                                 new_ex->end = max(lock->l_req_extent.start - 1,
256                                                   req_end);
257                         else
258                                 new_ex->end = max(l_extent->start - 1, req_end);
259                 }
260         }
261
262         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
263         EXIT;
264 }
265
266
267 /* In order to determine the largest possible extent we can grant, we need
268  * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270                                struct ldlm_lock *lock, __u64 *flags)
271 {
272         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
273
274         if (lock->l_export == NULL)
275                 /*
276                  * this is a local lock taken by server (e.g., as a part of
277                  * OST-side locking, or unlink handling). Expansion doesn't
278                  * make a lot of sense for local locks, because they are
279                  * dropped immediately on operation completion and would only
280                  * conflict with other threads.
281                  */
282                 return;
283
284         if (lock->l_policy_data.l_extent.start == 0 &&
285             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286                 /* fast-path whole file locks */
287                 return;
288
289         /* Because reprocess_queue zeroes flags and uses it to return
290          * LDLM_FL_LOCK_CHANGED, we must check for the NO_EXPANSION flag
291          * in the lock flags rather than the 'flags' argument */
292         if (likely(!(lock->l_flags & LDLM_FL_NO_EXPANSION))) {
293                 ldlm_extent_internal_policy_granted(lock, &new_ex);
294                 ldlm_extent_internal_policy_waiting(lock, &new_ex);
295         } else {
296                 LDLM_DEBUG(lock, "Not expanding manually requested lock.\n");
297                 new_ex.start = lock->l_policy_data.l_extent.start;
298                 new_ex.end = lock->l_policy_data.l_extent.end;
299                 /* In case the request is not on correct boundaries, we call
300                  * fixup. (normally called in ldlm_extent_internal_policy_*) */
301                 ldlm_extent_internal_policy_fixup(lock, &new_ex, 0);
302         }
303
304         if (!ldlm_extent_equal(&new_ex, &lock->l_policy_data.l_extent)) {
305                 *flags |= LDLM_FL_LOCK_CHANGED;
306                 lock->l_policy_data.l_extent.start = new_ex.start;
307                 lock->l_policy_data.l_extent.end = new_ex.end;
308         }
309 }
310
311 static bool ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
312 {
313         struct ldlm_resource *res = lock->l_resource;
314         time64_t now = ktime_get_seconds();
315
316         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
317                 return true;
318
319         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
320         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
321                 res->lr_contention_time = now;
322
323         return now < res->lr_contention_time +
324                      ldlm_res_to_ns(res)->ns_contention_time;
325 }
326
327 struct ldlm_extent_compat_args {
328         struct list_head *work_list;
329         struct ldlm_lock *lock;
330         enum ldlm_mode mode;
331         int *locks;
332         int *compat;
333 };
334
335 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
336                                                 void *data)
337 {
338         struct ldlm_extent_compat_args *priv = data;
339         struct ldlm_interval *node = to_ldlm_interval(n);
340         struct ldlm_extent *extent;
341         struct list_head *work_list = priv->work_list;
342         struct ldlm_lock *lock, *enq = priv->lock;
343         enum ldlm_mode mode = priv->mode;
344         int count = 0;
345         ENTRY;
346
347         LASSERT(!list_empty(&node->li_group));
348
349         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
350                 /* interval tree is for granted lock */
351                 LASSERTF(mode == lock->l_granted_mode,
352                          "mode = %s, lock->l_granted_mode = %s\n",
353                          ldlm_lockname[mode],
354                          ldlm_lockname[lock->l_granted_mode]);
355                 count++;
356                 if (lock->l_blocking_ast &&
357                     lock->l_granted_mode != LCK_GROUP)
358                         ldlm_add_ast_work_item(lock, enq, work_list);
359         }
360
361         /* don't count conflicting glimpse locks */
362         extent = ldlm_interval_extent(node);
363         if (!(mode == LCK_PR &&
364             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
365                 *priv->locks += count;
366
367         if (priv->compat)
368                 *priv->compat = 0;
369
370         RETURN(INTERVAL_ITER_CONT);
371 }
372
373 /**
374  * Determine if the lock is compatible with all locks on the queue.
375  *
376  * If \a work_list is provided, conflicting locks are linked there.
377  * If \a work_list is not provided, we exit this function on first conflict.
378  *
379  * \retval 0 if the lock is not compatible
380  * \retval 1 if the lock is compatible
381  * \retval 2 if \a req is a group lock and it is compatible and requires
382  *           no further checking
383  * \retval negative error, such as EWOULDBLOCK for group locks
384  */
385 static int
386 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
387                          __u64 *flags, struct list_head *work_list,
388                          int *contended_locks)
389 {
390         struct ldlm_resource *res = req->l_resource;
391         enum ldlm_mode req_mode = req->l_req_mode;
392         __u64 req_start = req->l_req_extent.start;
393         __u64 req_end = req->l_req_extent.end;
394         struct ldlm_lock *lock;
395         int check_contention;
396         int compat = 1;
397         ENTRY;
398
399         lockmode_verify(req_mode);
400
401         /* Using interval tree for granted lock */
402         if (queue == &res->lr_granted) {
403                 struct ldlm_interval_tree *tree;
404                 struct ldlm_extent_compat_args data = {.work_list = work_list,
405                                                .lock = req,
406                                                .locks = contended_locks,
407                                                .compat = &compat };
408                 struct interval_node_extent ex = { .start = req_start,
409                                                    .end = req_end };
410                 int idx, rc;
411
412                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
413                         tree = &res->lr_itree[idx];
414                         if (tree->lit_root == NULL) /* empty tree, skipped */
415                                 continue;
416
417                         data.mode = tree->lit_mode;
418                         if (lockmode_compat(req_mode, tree->lit_mode)) {
419                                 struct ldlm_interval *node;
420                                 struct ldlm_extent *extent;
421
422                                 if (req_mode != LCK_GROUP)
423                                         continue;
424
425                                 /* group lock, grant it immediately if
426                                  * compatible */
427                                 node = to_ldlm_interval(tree->lit_root);
428                                 extent = ldlm_interval_extent(node);
429                                 if (req->l_policy_data.l_extent.gid ==
430                                     extent->gid)
431                                         RETURN(2);
432                         }
433
434                         if (tree->lit_mode == LCK_GROUP) {
435                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT |
436                                               LDLM_FL_SPECULATIVE)) {
437                                         compat = -EWOULDBLOCK;
438                                         goto destroylock;
439                                 }
440
441                                 if (!work_list)
442                                         RETURN(0);
443
444                                 /* if work list is not NULL,add all
445                                    locks in the tree to work list */
446                                 compat = 0;
447                                 interval_iterate(tree->lit_root,
448                                                  ldlm_extent_compat_cb, &data);
449                                 continue;
450                         }
451
452                         /* We've found a potentially blocking lock, check
453                          * compatibility.  This handles locks other than GROUP
454                          * locks, which are handled separately above.
455                          *
456                          * Locks with FL_SPECULATIVE are asynchronous requests
457                          * which must never wait behind another lock, so they
458                          * fail if any conflicting lock is found. */
459                         if (!work_list || (*flags & LDLM_FL_SPECULATIVE)) {
460                                 rc = interval_is_overlapped(tree->lit_root,
461                                                             &ex);
462                                 if (rc) {
463                                         if (!work_list) {
464                                                 RETURN(0);
465                                         } else {
466                                                 compat = -EWOULDBLOCK;
467                                                 goto destroylock;
468                                         }
469                                 }
470                         } else {
471                                 interval_search(tree->lit_root, &ex,
472                                                 ldlm_extent_compat_cb, &data);
473                                 if (!list_empty(work_list) && compat)
474                                         compat = 0;
475                         }
476                 }
477         } else { /* for waiting queue */
478                 list_for_each_entry(lock, queue, l_res_link) {
479                         check_contention = 1;
480
481                         /* We stop walking the queue if we hit ourselves so
482                          * we don't take conflicting locks enqueued after us
483                          * into account, or we'd wait forever. */
484                         if (req == lock)
485                                 break;
486
487                         /* locks are compatible, overlap doesn't matter */
488                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
489                                 if (req_mode == LCK_PR &&
490                                     ((lock->l_policy_data.l_extent.start <=
491                                       req->l_policy_data.l_extent.start) &&
492                                      (lock->l_policy_data.l_extent.end >=
493                                       req->l_policy_data.l_extent.end))) {
494                                         /* If we met a PR lock just like us or
495                                            wider, and nobody down the list
496                                            conflicted with it, that means we
497                                            can skip processing of the rest of
498                                            the list and safely place ourselves
499                                            at the end of the list, or grant
500                                            (dependent if we met an conflicting
501                                            locks before in the list).  In case
502                                            of 1st enqueue only we continue
503                                            traversing if there is something
504                                            conflicting down the list because
505                                            we need to make sure that something
506                                            is marked as AST_SENT as well, in
507                                            cse of empy worklist we would exit
508                                            on first conflict met. */
509                                         /* There IS a case where such flag is
510                                            not set for a lock, yet it blocks
511                                            something. Luckily for us this is
512                                            only during destroy, so lock is
513                                            exclusive. So here we are safe */
514                                         if (!ldlm_is_ast_sent(lock))
515                                                 RETURN(compat);
516                                 }
517
518                                 /* non-group locks are compatible, overlap doesn't
519                                    matter */
520                                 if (likely(req_mode != LCK_GROUP))
521                                         continue;
522
523                                 /* If we are trying to get a GROUP lock and there is
524                                    another one of this kind, we need to compare gid */
525                                 if (req->l_policy_data.l_extent.gid ==
526                                     lock->l_policy_data.l_extent.gid) {
527                                         /* If existing lock with matched gid is granted,
528                                            we grant new one too. */
529                                         if (ldlm_is_granted(lock))
530                                                 RETURN(2);
531
532                                         /* Otherwise we are scanning queue of waiting
533                                          * locks and it means current request would
534                                          * block along with existing lock (that is
535                                          * already blocked.
536                                          * If we are in nonblocking mode - return
537                                          * immediately */
538                                         if (*flags & (LDLM_FL_BLOCK_NOWAIT
539                                                       | LDLM_FL_SPECULATIVE)) {
540                                                 compat = -EWOULDBLOCK;
541                                                 goto destroylock;
542                                         }
543                                         /* If this group lock is compatible with another
544                                          * group lock on the waiting list, they must be
545                                          * together in the list, so they can be granted
546                                          * at the same time.  Otherwise the later lock
547                                          * can get stuck behind another, incompatible,
548                                          * lock. */
549                                         ldlm_resource_insert_lock_after(lock, req);
550                                         /* Because 'lock' is not granted, we can stop
551                                          * processing this queue and return immediately.
552                                          * There is no need to check the rest of the
553                                          * list. */
554                                         RETURN(0);
555                                 }
556                         }
557
558                         if (unlikely(req_mode == LCK_GROUP &&
559                                      !ldlm_is_granted(lock))) {
560                                 compat = 0;
561                                 if (lock->l_req_mode != LCK_GROUP) {
562                                         /* Ok, we hit non-GROUP lock, there should be no
563                                            more GROUP locks later on, queue in front of
564                                            first non-GROUP lock */
565
566                                         ldlm_resource_insert_lock_before(lock, req);
567                                         break;
568                                 }
569                                 LASSERT(req->l_policy_data.l_extent.gid !=
570                                         lock->l_policy_data.l_extent.gid);
571                                 continue;
572                         }
573
574                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
575                                 /* If compared lock is GROUP, then requested is
576                                  * PR/PW so this is not compatible; extent
577                                  * range does not matter */
578                                 if (*flags & (LDLM_FL_BLOCK_NOWAIT
579                                               | LDLM_FL_SPECULATIVE)) {
580                                         compat = -EWOULDBLOCK;
581                                         goto destroylock;
582                                 }
583                         } else if (lock->l_policy_data.l_extent.end < req_start ||
584                                    lock->l_policy_data.l_extent.start > req_end) {
585                                 /* if a non group lock doesn't overlap skip it */
586                                 continue;
587                         } else if (lock->l_req_extent.end < req_start ||
588                                    lock->l_req_extent.start > req_end) {
589                                 /* false contention, the requests doesn't really overlap */
590                                 check_contention = 0;
591                         }
592
593                         if (!work_list)
594                                 RETURN(0);
595
596                         if (*flags & LDLM_FL_SPECULATIVE) {
597                                 compat = -EWOULDBLOCK;
598                                 goto destroylock;
599                         }
600
601                         /* don't count conflicting glimpse locks */
602                         if (lock->l_req_mode == LCK_PR &&
603                             lock->l_policy_data.l_extent.start == 0 &&
604                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
605                                 check_contention = 0;
606
607                         *contended_locks += check_contention;
608
609                         compat = 0;
610                         if (lock->l_blocking_ast &&
611                             lock->l_req_mode != LCK_GROUP)
612                                 ldlm_add_ast_work_item(lock, req, work_list);
613                 }
614         }
615
616         if (ldlm_check_contention(req, *contended_locks) &&
617             compat == 0 &&
618             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
619             req->l_req_mode != LCK_GROUP &&
620             req_end - req_start <=
621             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
622                 GOTO(destroylock, compat = -EUSERS);
623
624         RETURN(compat);
625 destroylock:
626         list_del_init(&req->l_res_link);
627         ldlm_lock_destroy_nolock(req);
628         RETURN(compat);
629 }
630
631 /**
632  * This function refresh eviction timer for cancelled lock.
633  * \param[in] lock              ldlm lock for refresh
634  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
635  *                              and counter are used
636  */
637 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
638                            struct ldlm_prolong_args *arg)
639 {
640         timeout_t timeout;
641
642         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PROLONG_PAUSE, 3);
643
644         if (arg->lpa_export != lock->l_export ||
645             lock->l_flags & LDLM_FL_DESTROYED)
646                 /* ignore unrelated locks */
647                 return;
648
649         arg->lpa_locks_cnt++;
650
651         if (!(lock->l_flags & LDLM_FL_AST_SENT))
652                 /* ignore locks not being cancelled */
653                 return;
654
655         /* We are in the middle of the process - BL AST is sent, CANCEL
656          * is ahead. Take half of BL AT + IO AT process time.
657          */
658         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
659
660         LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
661
662         arg->lpa_blocks_cnt++;
663
664         /* OK. this is a possible lock the user holds doing I/O
665          * let's refresh eviction timer for it.
666          */
667         ldlm_refresh_waiting_lock(lock, timeout);
668 }
669 EXPORT_SYMBOL(ldlm_lock_prolong_one);
670
671 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
672                                                    void *data)
673 {
674         struct ldlm_prolong_args *arg = data;
675         struct ldlm_interval *node = to_ldlm_interval(n);
676         struct ldlm_lock *lock;
677
678         ENTRY;
679
680         LASSERT(!list_empty(&node->li_group));
681
682         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
683                 ldlm_lock_prolong_one(lock, arg);
684         }
685
686         RETURN(INTERVAL_ITER_CONT);
687 }
688
689 /**
690  * Walk through granted tree and prolong locks if they overlaps extent.
691  *
692  * \param[in] arg               prolong args
693  */
694 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
695 {
696         struct ldlm_interval_tree *tree;
697         struct ldlm_resource *res;
698         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
699                                            .end = arg->lpa_extent.end };
700         int idx;
701
702         ENTRY;
703
704         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
705                                 &arg->lpa_resid, LDLM_EXTENT, 0);
706         if (IS_ERR(res)) {
707                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
708                        arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
709                 RETURN_EXIT;
710         }
711
712         lock_res(res);
713         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
714                 tree = &res->lr_itree[idx];
715                 if (tree->lit_root == NULL) /* empty tree, skipped */
716                         continue;
717
718                 /* There is no possibility to check for the groupID
719                  * so all the group locks are considered as valid
720                  * here, especially because the client is supposed
721                  * to check it has such a lock before sending an RPC.
722                  */
723                 if (!(tree->lit_mode & arg->lpa_mode))
724                         continue;
725
726                 interval_search(tree->lit_root, &ex,
727                                 ldlm_resource_prolong_cb, arg);
728         }
729
730         unlock_res(res);
731         ldlm_resource_putref(res);
732
733         EXIT;
734 }
735 EXPORT_SYMBOL(ldlm_resource_prolong);
736
737 /**
738  * Process a granting attempt for extent lock.
739  * Must be called with ns lock held.
740  *
741  * This function looks for any conflicts for \a lock in the granted or
742  * waiting queues. The lock is granted if no conflicts are found in
743  * either queue.
744  */
745 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
746                              enum ldlm_process_intention intention,
747                              enum ldlm_error *err, struct list_head *work_list)
748 {
749         struct ldlm_resource *res = lock->l_resource;
750         int rc, rc2 = 0;
751         int contended_locks = 0;
752         struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
753                                                         NULL : work_list;
754         ENTRY;
755
756         LASSERT(!ldlm_is_granted(lock));
757         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
758                 !ldlm_is_ast_discard_data(lock));
759         check_res_locked(res);
760         *err = ELDLM_OK;
761
762         if (intention == LDLM_PROCESS_RESCAN) {
763                 /* Careful observers will note that we don't handle -EWOULDBLOCK
764                  * here, but it's ok for a non-obvious reason -- compat_queue
765                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT |
766                  * SPECULATIVE). flags should always be zero here, and if that
767                  * ever stops being true, we want to find out. */
768                 LASSERT(*flags == 0);
769                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
770                                               NULL, &contended_locks);
771                 if (rc == 1) {
772                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
773                                                       flags, NULL,
774                                                       &contended_locks);
775                 }
776                 if (rc == 0)
777                         RETURN(LDLM_ITER_STOP);
778
779                 ldlm_resource_unlink_lock(lock);
780
781                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
782                         ldlm_extent_policy(res, lock, flags);
783                 ldlm_grant_lock(lock, grant_work);
784                 RETURN(LDLM_ITER_CONTINUE);
785         }
786
787         contended_locks = 0;
788         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
789                                       work_list, &contended_locks);
790         if (rc < 0)
791                 GOTO(out, *err = rc);
792
793         if (rc != 2) {
794                 rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
795                                                flags, work_list,
796                                                &contended_locks);
797                 if (rc2 < 0)
798                         GOTO(out, *err = rc = rc2);
799         }
800
801         if (rc + rc2 == 2) {
802                 ldlm_extent_policy(res, lock, flags);
803                 ldlm_resource_unlink_lock(lock);
804                 ldlm_grant_lock(lock, grant_work);
805         } else {
806                 /* Adding LDLM_FL_NO_TIMEOUT flag to granted lock to
807                  * force client to wait for the lock endlessly once
808                  * the lock is enqueued -bzzz */
809                 *flags |= LDLM_FL_NO_TIMEOUT;
810         }
811
812         RETURN(LDLM_ITER_CONTINUE);
813 out:
814         return rc;
815 }
816 #endif /* HAVE_SERVER_SUPPORT */
817
818 struct ldlm_kms_shift_args {
819         __u64   old_kms;
820         __u64   kms;
821         bool    complete;
822 };
823
824 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
825 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
826                                             void *args)
827 {
828         struct ldlm_kms_shift_args *arg = args;
829         struct ldlm_interval *node = to_ldlm_interval(n);
830         struct ldlm_lock *tmplock;
831         struct ldlm_lock *lock = NULL;
832
833         ENTRY;
834
835         /* Since all locks in an interval have the same extent, we can just
836          * use the first lock without kms_ignore set. */
837         list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
838                 if (ldlm_is_kms_ignore(tmplock))
839                         continue;
840
841                 lock = tmplock;
842
843                 break;
844         }
845
846         /* No locks in this interval without kms_ignore set */
847         if (!lock)
848                 RETURN(INTERVAL_ITER_CONT);
849
850         /* If we find a lock with a greater or equal kms, we are not the
851          * highest lock (or we share that distinction with another lock), and
852          * don't need to update KMS.  Return old_kms and stop looking. */
853         if (lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF ||
854             lock->l_policy_data.l_extent.end + 1 >= arg->old_kms) {
855                 arg->kms = arg->old_kms;
856                 arg->complete = true;
857                 RETURN(INTERVAL_ITER_STOP);
858         }
859
860         if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
861                 arg->kms = lock->l_policy_data.l_extent.end + 1;
862
863         /* Since interval_iterate_reverse starts with the highest lock and
864          * works down, for PW locks, we only need to check if we should update
865          * the kms, then stop walking the tree.  PR locks are not exclusive, so
866          * the highest start does not imply the highest end and we must
867          * continue. (Only one group lock is allowed per resource, so this is
868          * irrelevant for group locks.)*/
869         if (lock->l_granted_mode == LCK_PW)
870                 RETURN(INTERVAL_ITER_STOP);
871         else
872                 RETURN(INTERVAL_ITER_CONT);
873 }
874
875 /* When a lock is cancelled by a client, the KMS may undergo change if this
876  * is the "highest lock".  This function returns the new KMS value, updating
877  * it only if we were the highest lock.
878  *
879  * Caller must hold lr_lock already.
880  *
881  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
882 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
883 {
884         struct ldlm_resource *res = lock->l_resource;
885         struct ldlm_interval_tree *tree;
886         struct ldlm_kms_shift_args args;
887         int idx = 0;
888
889         ENTRY;
890
891         args.old_kms = old_kms;
892         args.kms = 0;
893         args.complete = false;
894
895         /* don't let another thread in ldlm_extent_shift_kms race in
896          * just after we finish and take our lock into account in its
897          * calculation of the kms */
898         ldlm_set_kms_ignore(lock);
899
900         /* We iterate over the lock trees, looking for the largest kms smaller
901          * than the current one. */
902         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
903                 tree = &res->lr_itree[idx];
904
905                 /* If our already known kms is >= than the highest 'end' in
906                  * this tree, we don't need to check this tree, because
907                  * the kms from a tree can be lower than in_max_high (due to
908                  * kms_ignore), but it can never be higher. */
909                 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
910                         continue;
911
912                 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
913                                          &args);
914
915                 /* this tells us we're not the highest lock, so we don't need
916                  * to check the remaining trees */
917                 if (args.complete)
918                         break;
919         }
920
921         LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
922                  args.old_kms);
923
924         RETURN(args.kms);
925 }
926 EXPORT_SYMBOL(ldlm_extent_shift_kms);
927
928 struct kmem_cache *ldlm_interval_slab;
929 static struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
930 {
931         struct ldlm_interval *node;
932         ENTRY;
933
934         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
935         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
936         if (node == NULL)
937                 RETURN(NULL);
938
939         INIT_LIST_HEAD(&node->li_group);
940         ldlm_interval_attach(node, lock);
941         RETURN(node);
942 }
943
944 void ldlm_interval_free(struct ldlm_interval *node)
945 {
946         if (node) {
947                 LASSERT(list_empty(&node->li_group));
948                 LASSERT(!interval_is_intree(&node->li_node));
949                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
950         }
951 }
952
953 /* interval tree, for LDLM_EXTENT. */
954 void ldlm_interval_attach(struct ldlm_interval *n,
955                           struct ldlm_lock *l)
956 {
957         LASSERT(l->l_tree_node == NULL);
958         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
959
960         list_add_tail(&l->l_sl_policy, &n->li_group);
961         l->l_tree_node = n;
962 }
963
964 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
965 {
966         struct ldlm_interval *n = l->l_tree_node;
967
968         if (n == NULL)
969                 return NULL;
970
971         LASSERT(!list_empty(&n->li_group));
972         l->l_tree_node = NULL;
973         list_del_init(&l->l_sl_policy);
974
975         return list_empty(&n->li_group) ? n : NULL;
976 }
977
978 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
979 {
980         int index;
981
982         LASSERT(mode != 0);
983         LASSERT(is_power_of_2(mode));
984         index = ilog2(mode);
985         LASSERT(index < LCK_MODE_NUM);
986         return index;
987 }
988
989 int ldlm_extent_alloc_lock(struct ldlm_lock *lock)
990 {
991         lock->l_tree_node = NULL;
992         if (ldlm_interval_alloc(lock) == NULL)
993                 return -ENOMEM;
994         return 0;
995 }
996
997 /** Add newly granted lock into interval tree for the resource. */
998 void ldlm_extent_add_lock(struct ldlm_resource *res,
999                           struct ldlm_lock *lock)
1000 {
1001         struct interval_node *found, **root;
1002         struct ldlm_interval *node;
1003         struct ldlm_extent *extent;
1004         int idx, rc;
1005
1006         LASSERT(ldlm_is_granted(lock));
1007
1008         node = lock->l_tree_node;
1009         LASSERT(node != NULL);
1010         LASSERT(!interval_is_intree(&node->li_node));
1011
1012         idx = ldlm_mode_to_index(lock->l_granted_mode);
1013         LASSERT(lock->l_granted_mode == BIT(idx));
1014         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1015
1016         /* node extent initialize */
1017         extent = &lock->l_policy_data.l_extent;
1018
1019         rc = interval_set(&node->li_node, extent->start, extent->end);
1020         LASSERT(!rc);
1021
1022         root = &res->lr_itree[idx].lit_root;
1023         found = interval_insert(&node->li_node, root);
1024         if (found) { /* The policy group found. */
1025                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1026                 LASSERT(tmp != NULL);
1027                 ldlm_interval_free(tmp);
1028                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1029         }
1030         res->lr_itree[idx].lit_size++;
1031
1032         /* even though we use interval tree to manage the extent lock, we also
1033          * add the locks into grant list, for debug purpose, .. */
1034         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1035
1036         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1037                 struct ldlm_lock *lck;
1038
1039                 list_for_each_entry_reverse(lck, &res->lr_granted,
1040                                             l_res_link) {
1041                         if (lck == lock)
1042                                 continue;
1043                         if (lockmode_compat(lck->l_granted_mode,
1044                                             lock->l_granted_mode))
1045                                 continue;
1046                         if (ldlm_extent_overlap(&lck->l_req_extent,
1047                                                 &lock->l_req_extent)) {
1048                                 CDEBUG(D_ERROR, "granting conflicting lock %p "
1049                                                 "%p\n", lck, lock);
1050                                 ldlm_resource_dump(D_ERROR, res);
1051                                 LBUG();
1052                         }
1053                 }
1054         }
1055 }
1056
1057 /** Remove cancelled lock from resource interval tree. */
1058 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1059 {
1060         struct ldlm_resource *res = lock->l_resource;
1061         struct ldlm_interval *node = lock->l_tree_node;
1062         struct ldlm_interval_tree *tree;
1063         int idx;
1064
1065         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1066                 return;
1067
1068         idx = ldlm_mode_to_index(lock->l_granted_mode);
1069         LASSERT(lock->l_granted_mode == BIT(idx));
1070         tree = &res->lr_itree[idx];
1071
1072         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1073
1074         tree->lit_size--;
1075         node = ldlm_interval_detach(lock);
1076         if (node) {
1077                 interval_erase(&node->li_node, &tree->lit_root);
1078                 ldlm_interval_free(node);
1079         }
1080 }
1081
1082 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1083                                       union ldlm_policy_data *lpolicy)
1084 {
1085         lpolicy->l_extent.start = wpolicy->l_extent.start;
1086         lpolicy->l_extent.end = wpolicy->l_extent.end;
1087         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1088 }
1089
1090 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1091                                       union ldlm_wire_policy_data *wpolicy)
1092 {
1093         memset(wpolicy, 0, sizeof(*wpolicy));
1094         wpolicy->l_extent.start = lpolicy->l_extent.start;
1095         wpolicy->l_extent.end = lpolicy->l_extent.end;
1096         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1097 }
1098