Whamcloud - gitweb
374834f903498408b3c52ac4795844f4b4b1da2c
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2013, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ldlm/ldlm_extent.c
33  *
34  * Author: Peter Braam <braam@clusterfs.com>
35  * Author: Phil Schwan <phil@clusterfs.com>
36  */
37
38 /**
39  * This file contains implementation of EXTENT lock type
40  *
41  * EXTENT lock type is for locking a contiguous range of values, represented
42  * by 64-bit starting and ending offsets (inclusive). There are several extent
43  * lock modes, some of which may be mutually incompatible. Extent locks are
44  * considered incompatible if their modes are incompatible and their extents
45  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
46  */
47
48 #define DEBUG_SUBSYSTEM S_LDLM
49
50 #include <libcfs/libcfs.h>
51 #include <lustre_dlm.h>
52 #include <obd_support.h>
53 #include <obd.h>
54 #include <obd_class.h>
55 #include <lustre_lib.h>
56
57 #include "ldlm_internal.h"
58
59 #ifdef HAVE_SERVER_SUPPORT
60 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
61
62 /**
63  * Fix up the ldlm_extent after expanding it.
64  *
65  * After expansion has been done, we might still want to do certain adjusting
66  * based on overall contention of the resource and the like to avoid granting
67  * overly wide locks.
68  */
69 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
70                                               struct ldlm_extent *new_ex,
71                                               int conflicting)
72 {
73         enum ldlm_mode req_mode = req->l_req_mode;
74         __u64 req_start = req->l_req_extent.start;
75         __u64 req_end = req->l_req_extent.end;
76         __u64 req_align, mask;
77
78         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
79                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
80                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
81                                           new_ex->end);
82         }
83
84         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
85                 EXIT;
86                 return;
87         }
88
89         /* we need to ensure that the lock extent is properly aligned to what
90          * the client requested. Also we need to make sure it's also server
91          * page size aligned otherwise a server page can be covered by two
92          * write locks. */
93         mask = PAGE_SIZE;
94         req_align = (req_end + 1) | req_start;
95         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
96                 while ((req_align & mask) == 0)
97                         mask <<= 1;
98         }
99         mask -= 1;
100         /* We can only shrink the lock, not grow it.
101          * This should never cause lock to be smaller than requested,
102          * since requested lock was already aligned on these boundaries. */
103         new_ex->start = ((new_ex->start - 1) | mask) + 1;
104         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
105         LASSERTF(new_ex->start <= req_start,
106                  "mask %#llx grant start %llu req start %llu\n",
107                  mask, new_ex->start, req_start);
108         LASSERTF(new_ex->end >= req_end,
109                  "mask %#llx grant end %llu req end %llu\n",
110                  mask, new_ex->end, req_end);
111 }
112
113 /**
114  * Return the maximum extent that:
115  * - contains the requested extent
116  * - does not overlap existing conflicting extents outside the requested one
117  *
118  * This allows clients to request a small required extent range, but if there
119  * is no contention on the lock the full lock can be granted to the client.
120  * This avoids the need for many smaller lock requests to be granted in the
121  * common (uncontended) case.
122  *
123  * Use interval tree to expand the lock extent for granted lock.
124  */
125 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
126                                                 struct ldlm_extent *new_ex)
127 {
128         struct ldlm_resource *res = req->l_resource;
129         enum ldlm_mode req_mode = req->l_req_mode;
130         __u64 req_start = req->l_req_extent.start;
131         __u64 req_end = req->l_req_extent.end;
132         struct ldlm_interval_tree *tree;
133         struct interval_node_extent limiter = {
134                 .start  = new_ex->start,
135                 .end    = new_ex->end,
136         };
137         int conflicting = 0;
138         int idx;
139         ENTRY;
140
141         lockmode_verify(req_mode);
142
143         /* Using interval tree to handle the LDLM extent granted locks. */
144         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
145                 struct interval_node_extent ext = {
146                         .start  = req_start,
147                         .end    = req_end,
148                 };
149
150                 tree = &res->lr_itree[idx];
151                 if (lockmode_compat(tree->lit_mode, req_mode))
152                         continue;
153
154                 conflicting += tree->lit_size;
155                 if (conflicting > 4)
156                         limiter.start = req_start;
157
158                 if (interval_is_overlapped(tree->lit_root, &ext))
159                         CDEBUG(D_INFO, 
160                                "req_mode = %d, tree->lit_mode = %d, "
161                                "tree->lit_size = %d\n",
162                                req_mode, tree->lit_mode, tree->lit_size);
163                 interval_expand(tree->lit_root, &ext, &limiter);
164                 limiter.start = max(limiter.start, ext.start);
165                 limiter.end = min(limiter.end, ext.end);
166                 if (limiter.start == req_start && limiter.end == req_end)
167                         break;
168         }
169
170         new_ex->start = limiter.start;
171         new_ex->end = limiter.end;
172         LASSERT(new_ex->start <= req_start);
173         LASSERT(new_ex->end >= req_end);
174
175         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
176         EXIT;
177 }
178
179 /* The purpose of this function is to return:
180  * - the maximum extent
181  * - containing the requested extent
182  * - and not overlapping existing conflicting extents outside the requested one
183  */
184 static void
185 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
186                                     struct ldlm_extent *new_ex)
187 {
188         struct ldlm_resource *res = req->l_resource;
189         enum ldlm_mode req_mode = req->l_req_mode;
190         __u64 req_start = req->l_req_extent.start;
191         __u64 req_end = req->l_req_extent.end;
192         struct ldlm_lock *lock;
193         int conflicting = 0;
194         ENTRY;
195
196         lockmode_verify(req_mode);
197
198         /* for waiting locks */
199         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
200                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
201
202                 /* We already hit the minimum requested size, search no more */
203                 if (new_ex->start == req_start && new_ex->end == req_end) {
204                         EXIT;
205                         return;
206                 }
207
208                 /* Don't conflict with ourselves */
209                 if (req == lock)
210                         continue;
211
212                 /* Locks are compatible, overlap doesn't matter */
213                 /* Until bug 20 is fixed, try to avoid granting overlapping
214                  * locks on one client (they take a long time to cancel) */
215                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
216                     lock->l_export != req->l_export)
217                         continue;
218
219                 /* If this is a high-traffic lock, don't grow downwards at all
220                  * or grow upwards too much */
221                 ++conflicting;
222                 if (conflicting > 4)
223                         new_ex->start = req_start;
224
225                 /* If lock doesn't overlap new_ex, skip it. */
226                 if (!ldlm_extent_overlap(l_extent, new_ex))
227                         continue;
228
229                 /* Locks conflicting in requested extents and we can't satisfy
230                  * both locks, so ignore it.  Either we will ping-pong this
231                  * extent (we would regardless of what extent we granted) or
232                  * lock is unused and it shouldn't limit our extent growth. */
233                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
234                         continue;
235
236                 /* We grow extents downwards only as far as they don't overlap
237                  * with already-granted locks, on the assumption that clients
238                  * will be writing beyond the initial requested end and would
239                  * then need to enqueue a new lock beyond previous request.
240                  * l_req_extent->end strictly < req_start, checked above. */
241                 if (l_extent->start < req_start && new_ex->start != req_start) {
242                         if (l_extent->end >= req_start)
243                                 new_ex->start = req_start;
244                         else
245                                 new_ex->start = min(l_extent->end+1, req_start);
246                 }
247
248                 /* If we need to cancel this lock anyways because our request
249                  * overlaps the granted lock, we grow up to its requested
250                  * extent start instead of limiting this extent, assuming that
251                  * clients are writing forwards and the lock had over grown
252                  * its extent downwards before we enqueued our request. */
253                 if (l_extent->end > req_end) {
254                         if (l_extent->start <= req_end)
255                                 new_ex->end = max(lock->l_req_extent.start - 1,
256                                                   req_end);
257                         else
258                                 new_ex->end = max(l_extent->start - 1, req_end);
259                 }
260         }
261
262         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
263         EXIT;
264 }
265
266
267 /* In order to determine the largest possible extent we can grant, we need
268  * to scan all of the queues. */
269 static void ldlm_extent_policy(struct ldlm_resource *res,
270                                struct ldlm_lock *lock, __u64 *flags)
271 {
272         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
273
274         if (lock->l_export == NULL)
275                 /*
276                  * this is local lock taken by server (e.g., as a part of
277                  * OST-side locking, or unlink handling). Expansion doesn't
278                  * make a lot of sense for local locks, because they are
279                  * dropped immediately on operation completion and would only
280                  * conflict with other threads.
281                  */
282                 return;
283
284         if (lock->l_policy_data.l_extent.start == 0 &&
285             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
286                 /* fast-path whole file locks */
287                 return;
288
289         ldlm_extent_internal_policy_granted(lock, &new_ex);
290         ldlm_extent_internal_policy_waiting(lock, &new_ex);
291
292         if (new_ex.start != lock->l_policy_data.l_extent.start ||
293             new_ex.end != lock->l_policy_data.l_extent.end) {
294                 *flags |= LDLM_FL_LOCK_CHANGED;
295                 lock->l_policy_data.l_extent.start = new_ex.start;
296                 lock->l_policy_data.l_extent.end = new_ex.end;
297         }
298 }
299
300 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
301 {
302         struct ldlm_resource *res = lock->l_resource;
303         cfs_time_t now = cfs_time_current();
304
305         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
306                 return 1;
307
308         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
309         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
310                 res->lr_contention_time = now;
311         return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
312                 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
313 }
314
315 struct ldlm_extent_compat_args {
316         struct list_head *work_list;
317         struct ldlm_lock *lock;
318         enum ldlm_mode mode;
319         int *locks;
320         int *compat;
321 };
322
323 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
324                                                 void *data)
325 {
326         struct ldlm_extent_compat_args *priv = data;
327         struct ldlm_interval *node = to_ldlm_interval(n);
328         struct ldlm_extent *extent;
329         struct list_head *work_list = priv->work_list;
330         struct ldlm_lock *lock, *enq = priv->lock;
331         enum ldlm_mode mode = priv->mode;
332         int count = 0;
333         ENTRY;
334
335         LASSERT(!list_empty(&node->li_group));
336
337         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
338                 /* interval tree is for granted lock */
339                 LASSERTF(mode == lock->l_granted_mode,
340                          "mode = %s, lock->l_granted_mode = %s\n",
341                          ldlm_lockname[mode],
342                          ldlm_lockname[lock->l_granted_mode]);
343                 count++;
344                 if (lock->l_blocking_ast &&
345                     lock->l_granted_mode != LCK_GROUP)
346                         ldlm_add_ast_work_item(lock, enq, work_list);
347         }
348
349         /* don't count conflicting glimpse locks */
350         extent = ldlm_interval_extent(node);
351         if (!(mode == LCK_PR &&
352             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
353                 *priv->locks += count;
354
355         if (priv->compat)
356                 *priv->compat = 0;
357
358         RETURN(INTERVAL_ITER_CONT);
359 }
360
361 /**
362  * Determine if the lock is compatible with all locks on the queue.
363  *
364  * If \a work_list is provided, conflicting locks are linked there.
365  * If \a work_list is not provided, we exit this function on first conflict.
366  *
367  * \retval 0 if the lock is not compatible
368  * \retval 1 if the lock is compatible
369  * \retval 2 if \a req is a group lock and it is compatible and requires
370  *           no further checking
371  * \retval negative error, such as EWOULDBLOCK for group locks
372  */
373 static int
374 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
375                          __u64 *flags, enum ldlm_error *err,
376                          struct list_head *work_list, int *contended_locks)
377 {
378         struct ldlm_resource *res = req->l_resource;
379         enum ldlm_mode req_mode = req->l_req_mode;
380         __u64 req_start = req->l_req_extent.start;
381         __u64 req_end = req->l_req_extent.end;
382         struct ldlm_lock *lock;
383         int check_contention;
384         int compat = 1;
385         int scan = 0;
386         ENTRY;
387
388         lockmode_verify(req_mode);
389
390         /* Using interval tree for granted lock */
391         if (queue == &res->lr_granted) {
392                 struct ldlm_interval_tree *tree;
393                 struct ldlm_extent_compat_args data = {.work_list = work_list,
394                                                .lock = req,
395                                                .locks = contended_locks,
396                                                .compat = &compat };
397                 struct interval_node_extent ex = { .start = req_start,
398                                                    .end = req_end };
399                 int idx, rc;
400
401                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
402                         tree = &res->lr_itree[idx];
403                         if (tree->lit_root == NULL) /* empty tree, skipped */
404                                 continue;
405
406                         data.mode = tree->lit_mode;
407                         if (lockmode_compat(req_mode, tree->lit_mode)) {
408                                 struct ldlm_interval *node;
409                                 struct ldlm_extent *extent;
410
411                                 if (req_mode != LCK_GROUP)
412                                         continue;
413
414                                 /* group lock, grant it immediately if
415                                  * compatible */
416                                 node = to_ldlm_interval(tree->lit_root);
417                                 extent = ldlm_interval_extent(node);
418                                 if (req->l_policy_data.l_extent.gid ==
419                                     extent->gid)
420                                         RETURN(2);
421                         }
422
423                         if (tree->lit_mode == LCK_GROUP) {
424                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
425                                         compat = -EWOULDBLOCK;
426                                         goto destroylock;
427                                 }
428
429                                 *flags |= LDLM_FL_NO_TIMEOUT;
430                                 if (!work_list)
431                                         RETURN(0);
432
433                                 /* if work list is not NULL,add all
434                                    locks in the tree to work list */
435                                 compat = 0;
436                                 interval_iterate(tree->lit_root,
437                                                  ldlm_extent_compat_cb, &data);
438                                 continue;
439                         }
440
441                         if (!work_list) {
442                                 rc = interval_is_overlapped(tree->lit_root,&ex);
443                                 if (rc)
444                                         RETURN(0);
445                         } else {
446                                 interval_search(tree->lit_root, &ex,
447                                                 ldlm_extent_compat_cb, &data);
448                                 if (!list_empty(work_list) && compat)
449                                         compat = 0;
450                         }
451                 }
452         } else { /* for waiting queue */
453                 list_for_each_entry(lock, queue, l_res_link) {
454                         check_contention = 1;
455
456                         /* We stop walking the queue if we hit ourselves so
457                          * we don't take conflicting locks enqueued after us
458                          * into account, or we'd wait forever. */
459                         if (req == lock)
460                                 break;
461
462                         if (unlikely(scan)) {
463                                 /* We only get here if we are queuing GROUP lock
464                                    and met some incompatible one. The main idea of this
465                                    code is to insert GROUP lock past compatible GROUP
466                                    lock in the waiting queue or if there is not any,
467                                    then in front of first non-GROUP lock */
468                                 if (lock->l_req_mode != LCK_GROUP) {
469                                         /* Ok, we hit non-GROUP lock, there should
470                                          * be no more GROUP locks later on, queue in
471                                          * front of first non-GROUP lock */
472
473                                         ldlm_resource_insert_lock_after(lock, req);
474                                         list_del_init(&lock->l_res_link);
475                                         ldlm_resource_insert_lock_after(req, lock);
476                                         compat = 0;
477                                         break;
478                                 }
479                                 if (req->l_policy_data.l_extent.gid ==
480                                     lock->l_policy_data.l_extent.gid) {
481                                         /* found it */
482                                         ldlm_resource_insert_lock_after(lock, req);
483                                         compat = 0;
484                                         break;
485                                 }
486                                 continue;
487                         }
488
489                         /* locks are compatible, overlap doesn't matter */
490                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
491                                 if (req_mode == LCK_PR &&
492                                     ((lock->l_policy_data.l_extent.start <=
493                                       req->l_policy_data.l_extent.start) &&
494                                      (lock->l_policy_data.l_extent.end >=
495                                       req->l_policy_data.l_extent.end))) {
496                                         /* If we met a PR lock just like us or
497                                            wider, and nobody down the list
498                                            conflicted with it, that means we
499                                            can skip processing of the rest of
500                                            the list and safely place ourselves
501                                            at the end of the list, or grant
502                                            (dependent if we met an conflicting
503                                            locks before in the list).  In case
504                                            of 1st enqueue only we continue
505                                            traversing if there is something
506                                            conflicting down the list because
507                                            we need to make sure that something
508                                            is marked as AST_SENT as well, in
509                                            cse of empy worklist we would exit
510                                            on first conflict met. */
511                                         /* There IS a case where such flag is
512                                            not set for a lock, yet it blocks
513                                            something. Luckily for us this is
514                                            only during destroy, so lock is
515                                            exclusive. So here we are safe */
516                                         if (!ldlm_is_ast_sent(lock))
517                                                 RETURN(compat);
518                                 }
519
520                                 /* non-group locks are compatible, overlap doesn't
521                                    matter */
522                                 if (likely(req_mode != LCK_GROUP))
523                                         continue;
524
525                                 /* If we are trying to get a GROUP lock and there is
526                                    another one of this kind, we need to compare gid */
527                                 if (req->l_policy_data.l_extent.gid ==
528                                     lock->l_policy_data.l_extent.gid) {
529                                         /* If existing lock with matched gid is granted,
530                                            we grant new one too. */
531                                         if (lock->l_req_mode == lock->l_granted_mode)
532                                                 RETURN(2);
533
534                                         /* Otherwise we are scanning queue of waiting
535                                          * locks and it means current request would
536                                          * block along with existing lock (that is
537                                          * already blocked.
538                                          * If we are in nonblocking mode - return
539                                          * immediately */
540                                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
541                                                 compat = -EWOULDBLOCK;
542                                                 goto destroylock;
543                                         }
544                                         /* If this group lock is compatible with another
545                                          * group lock on the waiting list, they must be
546                                          * together in the list, so they can be granted
547                                          * at the same time.  Otherwise the later lock
548                                          * can get stuck behind another, incompatible,
549                                          * lock. */
550                                         ldlm_resource_insert_lock_after(lock, req);
551                                         /* Because 'lock' is not granted, we can stop
552                                          * processing this queue and return immediately.
553                                          * There is no need to check the rest of the
554                                          * list. */
555                                         RETURN(0);
556                                 }
557                         }
558
559                         if (unlikely(req_mode == LCK_GROUP &&
560                                      (lock->l_req_mode != lock->l_granted_mode))) {
561                                 scan = 1;
562                                 compat = 0;
563                                 if (lock->l_req_mode != LCK_GROUP) {
564                                         /* Ok, we hit non-GROUP lock, there should be no
565                                            more GROUP locks later on, queue in front of
566                                            first non-GROUP lock */
567
568                                         ldlm_resource_insert_lock_after(lock, req);
569                                         list_del_init(&lock->l_res_link);
570                                         ldlm_resource_insert_lock_after(req, lock);
571                                         break;
572                                 }
573                                 if (req->l_policy_data.l_extent.gid ==
574                                     lock->l_policy_data.l_extent.gid) {
575                                         /* found it */
576                                         ldlm_resource_insert_lock_after(lock, req);
577                                         break;
578                                 }
579                                 continue;
580                         }
581
582                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
583                                 /* If compared lock is GROUP, then requested is PR/PW/
584                                  * so this is not compatible; extent range does not
585                                  * matter */
586                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
587                                         compat = -EWOULDBLOCK;
588                                         goto destroylock;
589                                 } else {
590                                         *flags |= LDLM_FL_NO_TIMEOUT;
591                                 }
592                         } else if (lock->l_policy_data.l_extent.end < req_start ||
593                                    lock->l_policy_data.l_extent.start > req_end) {
594                                 /* if a non group lock doesn't overlap skip it */
595                                 continue;
596                         } else if (lock->l_req_extent.end < req_start ||
597                                    lock->l_req_extent.start > req_end) {
598                                 /* false contention, the requests doesn't really overlap */
599                                 check_contention = 0;
600                         }
601
602                         if (!work_list)
603                                 RETURN(0);
604
605                         /* don't count conflicting glimpse locks */
606                         if (lock->l_req_mode == LCK_PR &&
607                             lock->l_policy_data.l_extent.start == 0 &&
608                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
609                                 check_contention = 0;
610
611                         *contended_locks += check_contention;
612
613                         compat = 0;
614                         if (lock->l_blocking_ast &&
615                             lock->l_req_mode != LCK_GROUP)
616                                 ldlm_add_ast_work_item(lock, req, work_list);
617                 }
618         }
619
620         if (ldlm_check_contention(req, *contended_locks) &&
621             compat == 0 &&
622             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
623             req->l_req_mode != LCK_GROUP &&
624             req_end - req_start <=
625             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
626                 GOTO(destroylock, compat = -EUSERS);
627
628         RETURN(compat);
629 destroylock:
630         list_del_init(&req->l_res_link);
631         ldlm_lock_destroy_nolock(req);
632         *err = compat;
633         RETURN(compat);
634 }
635
636 /**
637  * This function refresh eviction timer for cancelled lock.
638  * \param[in] lock              ldlm lock for refresh
639  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
640  *                              and counter are used
641  */
642 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
643                            struct ldlm_prolong_args *arg)
644 {
645         int timeout;
646
647         if (arg->lpa_export != lock->l_export ||
648             lock->l_flags & LDLM_FL_DESTROYED)
649                 /* ignore unrelated locks */
650                 return;
651
652         arg->lpa_locks_cnt++;
653
654         if (!(lock->l_flags & LDLM_FL_AST_SENT))
655                 /* ignore locks not being cancelled */
656                 return;
657
658         /* We are in the middle of the process - BL AST is sent, CANCEL
659          * is ahead. Take half of BL AT + IO AT process time.
660          */
661         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
662
663         LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
664
665         arg->lpa_blocks_cnt++;
666
667         /* OK. this is a possible lock the user holds doing I/O
668          * let's refresh eviction timer for it.
669          */
670         ldlm_refresh_waiting_lock(lock, timeout);
671 }
672 EXPORT_SYMBOL(ldlm_lock_prolong_one);
673
674 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
675                                                    void *data)
676 {
677         struct ldlm_prolong_args *arg = data;
678         struct ldlm_interval *node = to_ldlm_interval(n);
679         struct ldlm_lock *lock;
680
681         ENTRY;
682
683         LASSERT(!list_empty(&node->li_group));
684
685         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
686                 ldlm_lock_prolong_one(lock, arg);
687         }
688
689         RETURN(INTERVAL_ITER_CONT);
690 }
691
692 /**
693  * Walk through granted tree and prolong locks if they overlaps extent.
694  *
695  * \param[in] arg               prolong args
696  */
697 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
698 {
699         struct ldlm_interval_tree *tree;
700         struct ldlm_resource *res;
701         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
702                                            .end = arg->lpa_extent.end };
703         int idx;
704
705         ENTRY;
706
707         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
708                                 &arg->lpa_resid, LDLM_EXTENT, 0);
709         if (IS_ERR(res)) {
710                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid %llu/%llu\n",
711                        arg->lpa_resid.name[0], arg->lpa_resid.name[1]);
712                 RETURN_EXIT;
713         }
714
715         lock_res(res);
716         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
717                 tree = &res->lr_itree[idx];
718                 if (tree->lit_root == NULL) /* empty tree, skipped */
719                         continue;
720
721                 /* There is no possibility to check for the groupID
722                  * so all the group locks are considered as valid
723                  * here, especially because the client is supposed
724                  * to check it has such a lock before sending an RPC.
725                  */
726                 if (!(tree->lit_mode & arg->lpa_mode))
727                         continue;
728
729                 interval_search(tree->lit_root, &ex,
730                                 ldlm_resource_prolong_cb, arg);
731         }
732
733         unlock_res(res);
734         ldlm_resource_putref(res);
735
736         EXIT;
737 }
738 EXPORT_SYMBOL(ldlm_resource_prolong);
739
740
741 /**
742  * Discard all AST work items from list.
743  *
744  * If for whatever reason we do not want to send ASTs to conflicting locks
745  * anymore, disassemble the list with this function.
746  */
747 static void discard_bl_list(struct list_head *bl_list)
748 {
749         struct list_head *tmp, *pos;
750         ENTRY;
751
752         list_for_each_safe(pos, tmp, bl_list) {
753                 struct ldlm_lock *lock =
754                         list_entry(pos, struct ldlm_lock, l_bl_ast);
755
756                 list_del_init(&lock->l_bl_ast);
757                 LASSERT(ldlm_is_ast_sent(lock));
758                 ldlm_clear_ast_sent(lock);
759                 LASSERT(lock->l_bl_ast_run == 0);
760                 LASSERT(lock->l_blocking_lock);
761                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
762                 lock->l_blocking_lock = NULL;
763                 LDLM_LOCK_RELEASE(lock);
764         }
765         EXIT;
766 }
767
768 /**
769  * Process a granting attempt for extent lock.
770  * Must be called with ns lock held.
771  *
772  * This function looks for any conflicts for \a lock in the granted or
773  * waiting queues. The lock is granted if no conflicts are found in
774  * either queue.
775  *
776  * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
777  *   - blocking ASTs have already been sent
778  *
779  * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
780  *   - blocking ASTs have not been sent yet, so list of conflicting locks
781  *     would be collected and ASTs sent.
782  */
783 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
784                              int first_enq, enum ldlm_error *err,
785                              struct list_head *work_list)
786 {
787         struct ldlm_resource *res = lock->l_resource;
788         struct list_head rpc_list;
789         int rc, rc2;
790         int contended_locks = 0;
791         ENTRY;
792
793         LASSERT(lock->l_granted_mode != lock->l_req_mode);
794         LASSERT(list_empty(&res->lr_converting));
795         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
796                 !ldlm_is_ast_discard_data(lock));
797         INIT_LIST_HEAD(&rpc_list);
798         check_res_locked(res);
799         *err = ELDLM_OK;
800
801         if (!first_enq) {
802                 /* Careful observers will note that we don't handle -EWOULDBLOCK
803                  * here, but it's ok for a non-obvious reason -- compat_queue
804                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
805                  * flags should always be zero here, and if that ever stops
806                  * being true, we want to find out. */
807                 LASSERT(*flags == 0);
808                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
809                                               err, NULL, &contended_locks);
810                 if (rc == 1) {
811                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
812                                                       flags, err, NULL,
813                                                       &contended_locks);
814                 }
815                 if (rc == 0)
816                         RETURN(LDLM_ITER_STOP);
817
818                 ldlm_resource_unlink_lock(lock);
819
820                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
821                         ldlm_extent_policy(res, lock, flags);
822                 ldlm_grant_lock(lock, work_list);
823                 RETURN(LDLM_ITER_CONTINUE);
824         }
825
826  restart:
827         contended_locks = 0;
828         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
829                                       &rpc_list, &contended_locks);
830         if (rc < 0)
831                 GOTO(out, rc); /* lock was destroyed */
832         if (rc == 2)
833                 goto grant;
834
835         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
836                                        &rpc_list, &contended_locks);
837         if (rc2 < 0)
838                 GOTO(out, rc = rc2); /* lock was destroyed */
839
840         if (rc + rc2 == 2) {
841         grant:
842                 ldlm_extent_policy(res, lock, flags);
843                 ldlm_resource_unlink_lock(lock);
844                 ldlm_grant_lock(lock, NULL);
845         } else {
846                 /* If either of the compat_queue()s returned failure, then we
847                  * have ASTs to send and must go onto the waiting list.
848                  *
849                  * bug 2322: we used to unlink and re-add here, which was a
850                  * terrible folly -- if we goto restart, we could get
851                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
852                 if (list_empty(&lock->l_res_link))
853                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
854                 unlock_res(res);
855                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
856                                        LDLM_WORK_BL_AST);
857
858                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
859                     !ns_is_client(ldlm_res_to_ns(res)))
860                         class_fail_export(lock->l_export);
861
862                 lock_res(res);
863                 if (rc == -ERESTART) {
864                         /* 15715: The lock was granted and destroyed after
865                          * resource lock was dropped. Interval node was freed
866                          * in ldlm_lock_destroy. Anyway, this always happens
867                          * when a client is being evicted. So it would be
868                          * ok to return an error. -jay */
869                         if (ldlm_is_destroyed(lock)) {
870                                 *err = -EAGAIN;
871                                 GOTO(out, rc = -EAGAIN);
872                         }
873
874                         /* lock was granted while resource was unlocked. */
875                         if (lock->l_granted_mode == lock->l_req_mode) {
876                                 /* bug 11300: if the lock has been granted,
877                                  * break earlier because otherwise, we will go
878                                  * to restart and ldlm_resource_unlink will be
879                                  * called and it causes the interval node to be
880                                  * freed. Then we will fail at
881                                  * ldlm_extent_add_lock() */
882                                 *flags &= ~LDLM_FL_BLOCKED_MASK;
883                                 GOTO(out, rc = 0);
884                         }
885
886                         GOTO(restart, rc);
887                 }
888
889                 /* this way we force client to wait for the lock
890                  * endlessly once the lock is enqueued -bzzz */
891                 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
892
893         }
894         RETURN(0);
895 out:
896         if (!list_empty(&rpc_list)) {
897                 LASSERT(!ldlm_is_ast_discard_data(lock));
898                 discard_bl_list(&rpc_list);
899         }
900         RETURN(rc);
901 }
902 #endif /* HAVE_SERVER_SUPPORT */
903
904 struct ldlm_kms_shift_args {
905         __u64   old_kms;
906         __u64   kms;
907         bool    complete;
908 };
909
910 /* Callback for interval_iterate functions, used by ldlm_extent_shift_Kms */
911 static enum interval_iter ldlm_kms_shift_cb(struct interval_node *n,
912                                             void *args)
913 {
914         struct ldlm_kms_shift_args *arg = args;
915         struct ldlm_interval *node = to_ldlm_interval(n);
916         struct ldlm_lock *tmplock;
917         struct ldlm_lock *lock = NULL;
918
919         ENTRY;
920
921         /* Since all locks in an interval have the same extent, we can just
922          * use the first lock without kms_ignore set. */
923         list_for_each_entry(tmplock, &node->li_group, l_sl_policy) {
924                 if (ldlm_is_kms_ignore(tmplock))
925                         continue;
926
927                 lock = tmplock;
928
929                 break;
930         }
931
932         /* No locks in this interval without kms_ignore set */
933         if (!lock)
934                 RETURN(INTERVAL_ITER_CONT);
935
936         /* If we find a lock with a greater or equal kms, we are not the
937          * highest lock (or we share that distinction with another lock), and
938          * don't need to update KMS.  Return old_kms and stop looking. */
939         if (lock->l_policy_data.l_extent.end >= arg->old_kms) {
940                 arg->kms = arg->old_kms;
941                 arg->complete = true;
942                 RETURN(INTERVAL_ITER_STOP);
943         }
944
945         if (lock->l_policy_data.l_extent.end + 1 > arg->kms)
946                 arg->kms = lock->l_policy_data.l_extent.end + 1;
947
948         /* Since interval_iterate_reverse starts with the highest lock and
949          * works down, for PW locks, we only need to check if we should update
950          * the kms, then stop walking the tree.  PR locks are not exclusive, so
951          * the highest start does not imply the highest end and we must
952          * continue. (Only one group lock is allowed per resource, so this is
953          * irrelevant for group locks.)*/
954         if (lock->l_granted_mode == LCK_PW)
955                 RETURN(INTERVAL_ITER_STOP);
956         else
957                 RETURN(INTERVAL_ITER_CONT);
958 }
959
960 /* When a lock is cancelled by a client, the KMS may undergo change if this
961  * is the "highest lock".  This function returns the new KMS value, updating
962  * it only if we were the highest lock.
963  *
964  * Caller must hold lr_lock already.
965  *
966  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
967 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
968 {
969         struct ldlm_resource *res = lock->l_resource;
970         struct ldlm_interval_tree *tree;
971         struct ldlm_kms_shift_args args;
972         int idx = 0;
973
974         ENTRY;
975
976         args.old_kms = old_kms;
977         args.kms = 0;
978         args.complete = false;
979
980         /* don't let another thread in ldlm_extent_shift_kms race in
981          * just after we finish and take our lock into account in its
982          * calculation of the kms */
983         ldlm_set_kms_ignore(lock);
984
985         /* We iterate over the lock trees, looking for the largest kms smaller
986          * than the current one. */
987         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
988                 tree = &res->lr_itree[idx];
989
990                 /* If our already known kms is >= than the highest 'end' in
991                  * this tree, we don't need to check this tree, because
992                  * the kms from a tree can be lower than in_max_high (due to
993                  * kms_ignore), but it can never be higher. */
994                 if (!tree->lit_root || args.kms >= tree->lit_root->in_max_high)
995                         continue;
996
997                 interval_iterate_reverse(tree->lit_root, ldlm_kms_shift_cb,
998                                          &args);
999
1000                 /* this tells us we're not the highest lock, so we don't need
1001                  * to check the remaining trees */
1002                 if (args.complete)
1003                         break;
1004         }
1005
1006         LASSERTF(args.kms <= args.old_kms, "kms %llu old_kms %llu\n", args.kms,
1007                  args.old_kms);
1008
1009         RETURN(args.kms);
1010 }
1011 EXPORT_SYMBOL(ldlm_extent_shift_kms);
1012
1013 struct kmem_cache *ldlm_interval_slab;
1014 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
1015 {
1016         struct ldlm_interval *node;
1017         ENTRY;
1018
1019         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
1020         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
1021         if (node == NULL)
1022                 RETURN(NULL);
1023
1024         INIT_LIST_HEAD(&node->li_group);
1025         ldlm_interval_attach(node, lock);
1026         RETURN(node);
1027 }
1028
1029 void ldlm_interval_free(struct ldlm_interval *node)
1030 {
1031         if (node) {
1032                 LASSERT(list_empty(&node->li_group));
1033                 LASSERT(!interval_is_intree(&node->li_node));
1034                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
1035         }
1036 }
1037
1038 /* interval tree, for LDLM_EXTENT. */
1039 void ldlm_interval_attach(struct ldlm_interval *n,
1040                           struct ldlm_lock *l)
1041 {
1042         LASSERT(l->l_tree_node == NULL);
1043         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
1044
1045         list_add_tail(&l->l_sl_policy, &n->li_group);
1046         l->l_tree_node = n;
1047 }
1048
1049 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
1050 {
1051         struct ldlm_interval *n = l->l_tree_node;
1052
1053         if (n == NULL)
1054                 return NULL;
1055
1056         LASSERT(!list_empty(&n->li_group));
1057         l->l_tree_node = NULL;
1058         list_del_init(&l->l_sl_policy);
1059
1060         return list_empty(&n->li_group) ? n : NULL;
1061 }
1062
1063 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
1064 {
1065         int index;
1066
1067         LASSERT(mode != 0);
1068         LASSERT(IS_PO2(mode));
1069         for (index = -1; mode != 0; index++, mode >>= 1)
1070                 /* do nothing */;
1071         LASSERT(index < LCK_MODE_NUM);
1072         return index;
1073 }
1074
1075 /** Add newly granted lock into interval tree for the resource. */
1076 void ldlm_extent_add_lock(struct ldlm_resource *res,
1077                           struct ldlm_lock *lock)
1078 {
1079         struct interval_node *found, **root;
1080         struct ldlm_interval *node;
1081         struct ldlm_extent *extent;
1082         int idx, rc;
1083
1084         LASSERT(lock->l_granted_mode == lock->l_req_mode);
1085
1086         node = lock->l_tree_node;
1087         LASSERT(node != NULL);
1088         LASSERT(!interval_is_intree(&node->li_node));
1089
1090         idx = ldlm_mode_to_index(lock->l_granted_mode);
1091         LASSERT(lock->l_granted_mode == 1 << idx);
1092         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1093
1094         /* node extent initialize */
1095         extent = &lock->l_policy_data.l_extent;
1096
1097         rc = interval_set(&node->li_node, extent->start, extent->end);
1098         LASSERT(!rc);
1099
1100         root = &res->lr_itree[idx].lit_root;
1101         found = interval_insert(&node->li_node, root);
1102         if (found) { /* The policy group found. */
1103                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1104                 LASSERT(tmp != NULL);
1105                 ldlm_interval_free(tmp);
1106                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1107         }
1108         res->lr_itree[idx].lit_size++;
1109
1110         /* even though we use interval tree to manage the extent lock, we also
1111          * add the locks into grant list, for debug purpose, .. */
1112         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1113
1114         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
1115                 struct ldlm_lock *lck;
1116
1117                 list_for_each_entry_reverse(lck, &res->lr_granted,
1118                                             l_res_link) {
1119                         if (lck == lock)
1120                                 continue;
1121                         if (lockmode_compat(lck->l_granted_mode,
1122                                             lock->l_granted_mode))
1123                                 continue;
1124                         if (ldlm_extent_overlap(&lck->l_req_extent,
1125                                                 &lock->l_req_extent)) {
1126                                 CDEBUG(D_ERROR, "granting conflicting lock %p "
1127                                                 "%p\n", lck, lock);
1128                                 ldlm_resource_dump(D_ERROR, res);
1129                                 LBUG();
1130                         }
1131                 }
1132         }
1133 }
1134
1135 /** Remove cancelled lock from resource interval tree. */
1136 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1137 {
1138         struct ldlm_resource *res = lock->l_resource;
1139         struct ldlm_interval *node = lock->l_tree_node;
1140         struct ldlm_interval_tree *tree;
1141         int idx;
1142
1143         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1144                 return;
1145
1146         idx = ldlm_mode_to_index(lock->l_granted_mode);
1147         LASSERT(lock->l_granted_mode == 1 << idx);
1148         tree = &res->lr_itree[idx];
1149
1150         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1151
1152         tree->lit_size--;
1153         node = ldlm_interval_detach(lock);
1154         if (node) {
1155                 interval_erase(&node->li_node, &tree->lit_root);
1156                 ldlm_interval_free(node);
1157         }
1158 }
1159
1160 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1161                                       union ldlm_policy_data *lpolicy)
1162 {
1163         lpolicy->l_extent.start = wpolicy->l_extent.start;
1164         lpolicy->l_extent.end = wpolicy->l_extent.end;
1165         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1166 }
1167
1168 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1169                                       union ldlm_wire_policy_data *wpolicy)
1170 {
1171         memset(wpolicy, 0, sizeof(*wpolicy));
1172         wpolicy->l_extent.start = lpolicy->l_extent.start;
1173         wpolicy->l_extent.end = lpolicy->l_extent.end;
1174         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1175 }
1176