Whamcloud - gitweb
LU-7702 ldlm: skip lock if export failed
[fs/lustre-release.git] / lustre / ldlm / ldlm_extent.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2010, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ldlm/ldlm_extent.c
37  *
38  * Author: Peter Braam <braam@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41
42 /**
43  * This file contains implementation of EXTENT lock type
44  *
45  * EXTENT lock type is for locking a contiguous range of values, represented
46  * by 64-bit starting and ending offsets (inclusive). There are several extent
47  * lock modes, some of which may be mutually incompatible. Extent locks are
48  * considered incompatible if their modes are incompatible and their extents
49  * intersect.  See the lock mode compatibility matrix in lustre_dlm.h.
50  */
51
52 #define DEBUG_SUBSYSTEM S_LDLM
53
54 #include <libcfs/libcfs.h>
55 #include <lustre_dlm.h>
56 #include <obd_support.h>
57 #include <obd.h>
58 #include <obd_class.h>
59 #include <lustre_lib.h>
60
61 #include "ldlm_internal.h"
62
63 #ifdef HAVE_SERVER_SUPPORT
64 # define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1)
65
66 /**
67  * Fix up the ldlm_extent after expanding it.
68  *
69  * After expansion has been done, we might still want to do certain adjusting
70  * based on overall contention of the resource and the like to avoid granting
71  * overly wide locks.
72  */
73 static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req,
74                                               struct ldlm_extent *new_ex,
75                                               int conflicting)
76 {
77         enum ldlm_mode req_mode = req->l_req_mode;
78         __u64 req_start = req->l_req_extent.start;
79         __u64 req_end = req->l_req_extent.end;
80         __u64 req_align, mask;
81
82         if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) {
83                 if (req_end < req_start + LDLM_MAX_GROWN_EXTENT)
84                         new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT,
85                                           new_ex->end);
86         }
87
88         if (new_ex->start == 0 && new_ex->end == OBD_OBJECT_EOF) {
89                 EXIT;
90                 return;
91         }
92
93         /* we need to ensure that the lock extent is properly aligned to what
94          * the client requested. Also we need to make sure it's also server
95          * page size aligned otherwise a server page can be covered by two
96          * write locks. */
97         mask = PAGE_CACHE_SIZE;
98         req_align = (req_end + 1) | req_start;
99         if (req_align != 0 && (req_align & (mask - 1)) == 0) {
100                 while ((req_align & mask) == 0)
101                         mask <<= 1;
102         }
103         mask -= 1;
104         /* We can only shrink the lock, not grow it.
105          * This should never cause lock to be smaller than requested,
106          * since requested lock was already aligned on these boundaries. */
107         new_ex->start = ((new_ex->start - 1) | mask) + 1;
108         new_ex->end = ((new_ex->end + 1) & ~mask) - 1;
109         LASSERTF(new_ex->start <= req_start,
110                  "mask "LPX64" grant start "LPU64" req start "LPU64"\n",
111                  mask, new_ex->start, req_start);
112         LASSERTF(new_ex->end >= req_end,
113                  "mask "LPX64" grant end "LPU64" req end "LPU64"\n",
114                  mask, new_ex->end, req_end);
115 }
116
117 /**
118  * Return the maximum extent that:
119  * - contains the requested extent
120  * - does not overlap existing conflicting extents outside the requested one
121  *
122  * This allows clients to request a small required extent range, but if there
123  * is no contention on the lock the full lock can be granted to the client.
124  * This avoids the need for many smaller lock requests to be granted in the
125  * common (uncontended) case.
126  *
127  * Use interval tree to expand the lock extent for granted lock.
128  */
129 static void ldlm_extent_internal_policy_granted(struct ldlm_lock *req,
130                                                 struct ldlm_extent *new_ex)
131 {
132         struct ldlm_resource *res = req->l_resource;
133         enum ldlm_mode req_mode = req->l_req_mode;
134         __u64 req_start = req->l_req_extent.start;
135         __u64 req_end = req->l_req_extent.end;
136         struct ldlm_interval_tree *tree;
137         struct interval_node_extent limiter = { new_ex->start, new_ex->end };
138         int conflicting = 0;
139         int idx;
140         ENTRY;
141
142         lockmode_verify(req_mode);
143
144         /* Using interval tree to handle the LDLM extent granted locks. */
145         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
146                 struct interval_node_extent ext = { req_start, req_end };
147
148                 tree = &res->lr_itree[idx];
149                 if (lockmode_compat(tree->lit_mode, req_mode))
150                         continue;
151
152                 conflicting += tree->lit_size;
153                 if (conflicting > 4)
154                         limiter.start = req_start;
155
156                 if (interval_is_overlapped(tree->lit_root, &ext))
157                         CDEBUG(D_INFO, 
158                                "req_mode = %d, tree->lit_mode = %d, "
159                                "tree->lit_size = %d\n",
160                                req_mode, tree->lit_mode, tree->lit_size);
161                 interval_expand(tree->lit_root, &ext, &limiter);
162                 limiter.start = max(limiter.start, ext.start);
163                 limiter.end = min(limiter.end, ext.end);
164                 if (limiter.start == req_start && limiter.end == req_end)
165                         break;
166         }
167
168         new_ex->start = limiter.start;
169         new_ex->end = limiter.end;
170         LASSERT(new_ex->start <= req_start);
171         LASSERT(new_ex->end >= req_end);
172
173         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
174         EXIT;
175 }
176
177 /* The purpose of this function is to return:
178  * - the maximum extent
179  * - containing the requested extent
180  * - and not overlapping existing conflicting extents outside the requested one
181  */
182 static void
183 ldlm_extent_internal_policy_waiting(struct ldlm_lock *req,
184                                     struct ldlm_extent *new_ex)
185 {
186         struct ldlm_resource *res = req->l_resource;
187         enum ldlm_mode req_mode = req->l_req_mode;
188         __u64 req_start = req->l_req_extent.start;
189         __u64 req_end = req->l_req_extent.end;
190         struct ldlm_lock *lock;
191         int conflicting = 0;
192         ENTRY;
193
194         lockmode_verify(req_mode);
195
196         /* for waiting locks */
197         list_for_each_entry(lock, &res->lr_waiting, l_res_link) {
198                 struct ldlm_extent *l_extent = &lock->l_policy_data.l_extent;
199
200                 /* We already hit the minimum requested size, search no more */
201                 if (new_ex->start == req_start && new_ex->end == req_end) {
202                         EXIT;
203                         return;
204                 }
205
206                 /* Don't conflict with ourselves */
207                 if (req == lock)
208                         continue;
209
210                 /* Locks are compatible, overlap doesn't matter */
211                 /* Until bug 20 is fixed, try to avoid granting overlapping
212                  * locks on one client (they take a long time to cancel) */
213                 if (lockmode_compat(lock->l_req_mode, req_mode) &&
214                     lock->l_export != req->l_export)
215                         continue;
216
217                 /* If this is a high-traffic lock, don't grow downwards at all
218                  * or grow upwards too much */
219                 ++conflicting;
220                 if (conflicting > 4)
221                         new_ex->start = req_start;
222
223                 /* If lock doesn't overlap new_ex, skip it. */
224                 if (!ldlm_extent_overlap(l_extent, new_ex))
225                         continue;
226
227                 /* Locks conflicting in requested extents and we can't satisfy
228                  * both locks, so ignore it.  Either we will ping-pong this
229                  * extent (we would regardless of what extent we granted) or
230                  * lock is unused and it shouldn't limit our extent growth. */
231                 if (ldlm_extent_overlap(&lock->l_req_extent,&req->l_req_extent))
232                         continue;
233
234                 /* We grow extents downwards only as far as they don't overlap
235                  * with already-granted locks, on the assumption that clients
236                  * will be writing beyond the initial requested end and would
237                  * then need to enqueue a new lock beyond previous request.
238                  * l_req_extent->end strictly < req_start, checked above. */
239                 if (l_extent->start < req_start && new_ex->start != req_start) {
240                         if (l_extent->end >= req_start)
241                                 new_ex->start = req_start;
242                         else
243                                 new_ex->start = min(l_extent->end+1, req_start);
244                 }
245
246                 /* If we need to cancel this lock anyways because our request
247                  * overlaps the granted lock, we grow up to its requested
248                  * extent start instead of limiting this extent, assuming that
249                  * clients are writing forwards and the lock had over grown
250                  * its extent downwards before we enqueued our request. */
251                 if (l_extent->end > req_end) {
252                         if (l_extent->start <= req_end)
253                                 new_ex->end = max(lock->l_req_extent.start - 1,
254                                                   req_end);
255                         else
256                                 new_ex->end = max(l_extent->start - 1, req_end);
257                 }
258         }
259
260         ldlm_extent_internal_policy_fixup(req, new_ex, conflicting);
261         EXIT;
262 }
263
264
265 /* In order to determine the largest possible extent we can grant, we need
266  * to scan all of the queues. */
267 static void ldlm_extent_policy(struct ldlm_resource *res,
268                                struct ldlm_lock *lock, __u64 *flags)
269 {
270         struct ldlm_extent new_ex = { .start = 0, .end = OBD_OBJECT_EOF };
271
272         if (lock->l_export == NULL)
273                 /*
274                  * this is local lock taken by server (e.g., as a part of
275                  * OST-side locking, or unlink handling). Expansion doesn't
276                  * make a lot of sense for local locks, because they are
277                  * dropped immediately on operation completion and would only
278                  * conflict with other threads.
279                  */
280                 return;
281
282         if (lock->l_policy_data.l_extent.start == 0 &&
283             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
284                 /* fast-path whole file locks */
285                 return;
286
287         ldlm_extent_internal_policy_granted(lock, &new_ex);
288         ldlm_extent_internal_policy_waiting(lock, &new_ex);
289
290         if (new_ex.start != lock->l_policy_data.l_extent.start ||
291             new_ex.end != lock->l_policy_data.l_extent.end) {
292                 *flags |= LDLM_FL_LOCK_CHANGED;
293                 lock->l_policy_data.l_extent.start = new_ex.start;
294                 lock->l_policy_data.l_extent.end = new_ex.end;
295         }
296 }
297
298 static int ldlm_check_contention(struct ldlm_lock *lock, int contended_locks)
299 {
300         struct ldlm_resource *res = lock->l_resource;
301         cfs_time_t now = cfs_time_current();
302
303         if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_SET_CONTENTION))
304                 return 1;
305
306         CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
307         if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
308                 res->lr_contention_time = now;
309         return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
310                 cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
311 }
312
313 struct ldlm_extent_compat_args {
314         struct list_head *work_list;
315         struct ldlm_lock *lock;
316         enum ldlm_mode mode;
317         int *locks;
318         int *compat;
319 };
320
321 static enum interval_iter ldlm_extent_compat_cb(struct interval_node *n,
322                                                 void *data)
323 {
324         struct ldlm_extent_compat_args *priv = data;
325         struct ldlm_interval *node = to_ldlm_interval(n);
326         struct ldlm_extent *extent;
327         struct list_head *work_list = priv->work_list;
328         struct ldlm_lock *lock, *enq = priv->lock;
329         enum ldlm_mode mode = priv->mode;
330         int count = 0;
331         ENTRY;
332
333         LASSERT(!list_empty(&node->li_group));
334
335         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
336                 /* interval tree is for granted lock */
337                 LASSERTF(mode == lock->l_granted_mode,
338                          "mode = %s, lock->l_granted_mode = %s\n",
339                          ldlm_lockname[mode],
340                          ldlm_lockname[lock->l_granted_mode]);
341                 count++;
342                 if (lock->l_blocking_ast &&
343                     lock->l_granted_mode != LCK_GROUP)
344                         ldlm_add_ast_work_item(lock, enq, work_list);
345         }
346
347         /* don't count conflicting glimpse locks */
348         extent = ldlm_interval_extent(node);
349         if (!(mode == LCK_PR &&
350             extent->start == 0 && extent->end == OBD_OBJECT_EOF))
351                 *priv->locks += count;
352
353         if (priv->compat)
354                 *priv->compat = 0;
355
356         RETURN(INTERVAL_ITER_CONT);
357 }
358
359 /**
360  * Determine if the lock is compatible with all locks on the queue.
361  *
362  * If \a work_list is provided, conflicting locks are linked there.
363  * If \a work_list is not provided, we exit this function on first conflict.
364  *
365  * \retval 0 if the lock is not compatible
366  * \retval 1 if the lock is compatible
367  * \retval 2 if \a req is a group lock and it is compatible and requires
368  *           no further checking
369  * \retval negative error, such as EWOULDBLOCK for group locks
370  */
371 static int
372 ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
373                          __u64 *flags, enum ldlm_error *err,
374                          struct list_head *work_list, int *contended_locks)
375 {
376         struct ldlm_resource *res = req->l_resource;
377         enum ldlm_mode req_mode = req->l_req_mode;
378         __u64 req_start = req->l_req_extent.start;
379         __u64 req_end = req->l_req_extent.end;
380         struct ldlm_lock *lock;
381         int check_contention;
382         int compat = 1;
383         int scan = 0;
384         ENTRY;
385
386         lockmode_verify(req_mode);
387
388         /* Using interval tree for granted lock */
389         if (queue == &res->lr_granted) {
390                 struct ldlm_interval_tree *tree;
391                 struct ldlm_extent_compat_args data = {.work_list = work_list,
392                                                .lock = req,
393                                                .locks = contended_locks,
394                                                .compat = &compat };
395                 struct interval_node_extent ex = { .start = req_start,
396                                                    .end = req_end };
397                 int idx, rc;
398
399                 for (idx = 0; idx < LCK_MODE_NUM; idx++) {
400                         tree = &res->lr_itree[idx];
401                         if (tree->lit_root == NULL) /* empty tree, skipped */
402                                 continue;
403
404                         data.mode = tree->lit_mode;
405                         if (lockmode_compat(req_mode, tree->lit_mode)) {
406                                 struct ldlm_interval *node;
407                                 struct ldlm_extent *extent;
408
409                                 if (req_mode != LCK_GROUP)
410                                         continue;
411
412                                 /* group lock, grant it immediately if
413                                  * compatible */
414                                 node = to_ldlm_interval(tree->lit_root);
415                                 extent = ldlm_interval_extent(node);
416                                 if (req->l_policy_data.l_extent.gid ==
417                                     extent->gid)
418                                         RETURN(2);
419                         }
420
421                         if (tree->lit_mode == LCK_GROUP) {
422                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
423                                         compat = -EWOULDBLOCK;
424                                         goto destroylock;
425                                 }
426
427                                 *flags |= LDLM_FL_NO_TIMEOUT;
428                                 if (!work_list)
429                                         RETURN(0);
430
431                                 /* if work list is not NULL,add all
432                                    locks in the tree to work list */
433                                 compat = 0;
434                                 interval_iterate(tree->lit_root,
435                                                  ldlm_extent_compat_cb, &data);
436                                 continue;
437                         }
438
439                         if (!work_list) {
440                                 rc = interval_is_overlapped(tree->lit_root,&ex);
441                                 if (rc)
442                                         RETURN(0);
443                         } else {
444                                 interval_search(tree->lit_root, &ex,
445                                                 ldlm_extent_compat_cb, &data);
446                                 if (!list_empty(work_list) && compat)
447                                         compat = 0;
448                         }
449                 }
450         } else { /* for waiting queue */
451                 list_for_each_entry(lock, queue, l_res_link) {
452                         check_contention = 1;
453
454                         /* We stop walking the queue if we hit ourselves so
455                          * we don't take conflicting locks enqueued after us
456                          * into account, or we'd wait forever. */
457                         if (req == lock)
458                                 break;
459
460                         if (unlikely(scan)) {
461                                 /* We only get here if we are queuing GROUP lock
462                                    and met some incompatible one. The main idea of this
463                                    code is to insert GROUP lock past compatible GROUP
464                                    lock in the waiting queue or if there is not any,
465                                    then in front of first non-GROUP lock */
466                                 if (lock->l_req_mode != LCK_GROUP) {
467                                         /* Ok, we hit non-GROUP lock, there should
468                                          * be no more GROUP locks later on, queue in
469                                          * front of first non-GROUP lock */
470
471                                         ldlm_resource_insert_lock_after(lock, req);
472                                         list_del_init(&lock->l_res_link);
473                                         ldlm_resource_insert_lock_after(req, lock);
474                                         compat = 0;
475                                         break;
476                                 }
477                                 if (req->l_policy_data.l_extent.gid ==
478                                     lock->l_policy_data.l_extent.gid) {
479                                         /* found it */
480                                         ldlm_resource_insert_lock_after(lock, req);
481                                         compat = 0;
482                                         break;
483                                 }
484                                 continue;
485                         }
486
487                         /* locks are compatible, overlap doesn't matter */
488                         if (lockmode_compat(lock->l_req_mode, req_mode)) {
489                                 if (req_mode == LCK_PR &&
490                                     ((lock->l_policy_data.l_extent.start <=
491                                       req->l_policy_data.l_extent.start) &&
492                                      (lock->l_policy_data.l_extent.end >=
493                                       req->l_policy_data.l_extent.end))) {
494                                         /* If we met a PR lock just like us or
495                                            wider, and nobody down the list
496                                            conflicted with it, that means we
497                                            can skip processing of the rest of
498                                            the list and safely place ourselves
499                                            at the end of the list, or grant
500                                            (dependent if we met an conflicting
501                                            locks before in the list).  In case
502                                            of 1st enqueue only we continue
503                                            traversing if there is something
504                                            conflicting down the list because
505                                            we need to make sure that something
506                                            is marked as AST_SENT as well, in
507                                            cse of empy worklist we would exit
508                                            on first conflict met. */
509                                         /* There IS a case where such flag is
510                                            not set for a lock, yet it blocks
511                                            something. Luckily for us this is
512                                            only during destroy, so lock is
513                                            exclusive. So here we are safe */
514                                         if (!ldlm_is_ast_sent(lock))
515                                                 RETURN(compat);
516                                 }
517
518                                 /* non-group locks are compatible, overlap doesn't
519                                    matter */
520                                 if (likely(req_mode != LCK_GROUP))
521                                         continue;
522
523                                 /* If we are trying to get a GROUP lock and there is
524                                    another one of this kind, we need to compare gid */
525                                 if (req->l_policy_data.l_extent.gid ==
526                                     lock->l_policy_data.l_extent.gid) {
527                                         /* If existing lock with matched gid is granted,
528                                            we grant new one too. */
529                                         if (lock->l_req_mode == lock->l_granted_mode)
530                                                 RETURN(2);
531
532                                         /* Otherwise we are scanning queue of waiting
533                                          * locks and it means current request would
534                                          * block along with existing lock (that is
535                                          * already blocked.
536                                          * If we are in nonblocking mode - return
537                                          * immediately */
538                                         if (*flags & LDLM_FL_BLOCK_NOWAIT) {
539                                                 compat = -EWOULDBLOCK;
540                                                 goto destroylock;
541                                         }
542                                         /* If this group lock is compatible with another
543                                          * group lock on the waiting list, they must be
544                                          * together in the list, so they can be granted
545                                          * at the same time.  Otherwise the later lock
546                                          * can get stuck behind another, incompatible,
547                                          * lock. */
548                                         ldlm_resource_insert_lock_after(lock, req);
549                                         /* Because 'lock' is not granted, we can stop
550                                          * processing this queue and return immediately.
551                                          * There is no need to check the rest of the
552                                          * list. */
553                                         RETURN(0);
554                                 }
555                         }
556
557                         if (unlikely(req_mode == LCK_GROUP &&
558                                      (lock->l_req_mode != lock->l_granted_mode))) {
559                                 scan = 1;
560                                 compat = 0;
561                                 if (lock->l_req_mode != LCK_GROUP) {
562                                         /* Ok, we hit non-GROUP lock, there should be no
563                                            more GROUP locks later on, queue in front of
564                                            first non-GROUP lock */
565
566                                         ldlm_resource_insert_lock_after(lock, req);
567                                         list_del_init(&lock->l_res_link);
568                                         ldlm_resource_insert_lock_after(req, lock);
569                                         break;
570                                 }
571                                 if (req->l_policy_data.l_extent.gid ==
572                                     lock->l_policy_data.l_extent.gid) {
573                                         /* found it */
574                                         ldlm_resource_insert_lock_after(lock, req);
575                                         break;
576                                 }
577                                 continue;
578                         }
579
580                         if (unlikely(lock->l_req_mode == LCK_GROUP)) {
581                                 /* If compared lock is GROUP, then requested is PR/PW/
582                                  * so this is not compatible; extent range does not
583                                  * matter */
584                                 if (*flags & LDLM_FL_BLOCK_NOWAIT) {
585                                         compat = -EWOULDBLOCK;
586                                         goto destroylock;
587                                 } else {
588                                         *flags |= LDLM_FL_NO_TIMEOUT;
589                                 }
590                         } else if (lock->l_policy_data.l_extent.end < req_start ||
591                                    lock->l_policy_data.l_extent.start > req_end) {
592                                 /* if a non group lock doesn't overlap skip it */
593                                 continue;
594                         } else if (lock->l_req_extent.end < req_start ||
595                                    lock->l_req_extent.start > req_end) {
596                                 /* false contention, the requests doesn't really overlap */
597                                 check_contention = 0;
598                         }
599
600                         if (!work_list)
601                                 RETURN(0);
602
603                         /* don't count conflicting glimpse locks */
604                         if (lock->l_req_mode == LCK_PR &&
605                             lock->l_policy_data.l_extent.start == 0 &&
606                             lock->l_policy_data.l_extent.end == OBD_OBJECT_EOF)
607                                 check_contention = 0;
608
609                         *contended_locks += check_contention;
610
611                         compat = 0;
612                         if (lock->l_blocking_ast &&
613                             lock->l_req_mode != LCK_GROUP)
614                                 ldlm_add_ast_work_item(lock, req, work_list);
615                 }
616         }
617
618         if (ldlm_check_contention(req, *contended_locks) &&
619             compat == 0 &&
620             (*flags & LDLM_FL_DENY_ON_CONTENTION) &&
621             req->l_req_mode != LCK_GROUP &&
622             req_end - req_start <=
623             ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
624                 GOTO(destroylock, compat = -EUSERS);
625
626         RETURN(compat);
627 destroylock:
628         list_del_init(&req->l_res_link);
629         ldlm_lock_destroy_nolock(req);
630         *err = compat;
631         RETURN(compat);
632 }
633
634 /**
635  * This function refresh eviction timer for cancelled lock.
636  * \param[in] lock              ldlm lock for refresh
637  * \param[in] arg               ldlm prolong arguments, timeout, export, extent
638  *                              and counter are used
639  */
640 void ldlm_lock_prolong_one(struct ldlm_lock *lock,
641                            struct ldlm_prolong_args *arg)
642 {
643         int timeout;
644
645         if (arg->lpa_export != lock->l_export ||
646             lock->l_flags & LDLM_FL_DESTROYED)
647                 /* ignore unrelated locks */
648                 return;
649
650         arg->lpa_locks_cnt++;
651
652         if (!(lock->l_flags & LDLM_FL_AST_SENT))
653                 /* ignore locks not being cancelled */
654                 return;
655
656         /* We are in the middle of the process - BL AST is sent, CANCEL
657          * is ahead. Take half of BL AT + IO AT process time.
658          */
659         timeout = arg->lpa_timeout + (ldlm_bl_timeout(lock) >> 1);
660
661         LDLM_DEBUG(lock, "refreshed to %ds.\n", timeout);
662
663         arg->lpa_blocks_cnt++;
664
665         /* OK. this is a possible lock the user holds doing I/O
666          * let's refresh eviction timer for it.
667          */
668         ldlm_refresh_waiting_lock(lock, timeout);
669 }
670 EXPORT_SYMBOL(ldlm_lock_prolong_one);
671
672 static enum interval_iter ldlm_resource_prolong_cb(struct interval_node *n,
673                                                    void *data)
674 {
675         struct ldlm_prolong_args *arg = data;
676         struct ldlm_interval *node = to_ldlm_interval(n);
677         struct ldlm_lock *lock;
678
679         ENTRY;
680
681         LASSERT(!list_empty(&node->li_group));
682
683         list_for_each_entry(lock, &node->li_group, l_sl_policy) {
684                 ldlm_lock_prolong_one(lock, arg);
685         }
686
687         RETURN(INTERVAL_ITER_CONT);
688 }
689
690 /**
691  * Walk through granted tree and prolong locks if they overlaps extent.
692  *
693  * \param[in] arg               prolong args
694  */
695 void ldlm_resource_prolong(struct ldlm_prolong_args *arg)
696 {
697         struct ldlm_interval_tree *tree;
698         struct ldlm_resource *res;
699         struct interval_node_extent ex = { .start = arg->lpa_extent.start,
700                                            .end = arg->lpa_extent.end };
701         int idx;
702
703         ENTRY;
704
705         res = ldlm_resource_get(arg->lpa_export->exp_obd->obd_namespace, NULL,
706                                 &arg->lpa_resid, LDLM_EXTENT, 0);
707         if (IS_ERR(res)) {
708                 CDEBUG(D_DLMTRACE, "Failed to get resource for resid "LPU64"/"
709                        LPU64"\n", arg->lpa_resid.name[0],
710                        arg->lpa_resid.name[1]);
711                 RETURN_EXIT;
712         }
713
714         lock_res(res);
715         for (idx = 0; idx < LCK_MODE_NUM; idx++) {
716                 tree = &res->lr_itree[idx];
717                 if (tree->lit_root == NULL) /* empty tree, skipped */
718                         continue;
719
720                 /* There is no possibility to check for the groupID
721                  * so all the group locks are considered as valid
722                  * here, especially because the client is supposed
723                  * to check it has such a lock before sending an RPC.
724                  */
725                 if (!(tree->lit_mode & arg->lpa_mode))
726                         continue;
727
728                 interval_search(tree->lit_root, &ex,
729                                 ldlm_resource_prolong_cb, arg);
730         }
731
732         unlock_res(res);
733         ldlm_resource_putref(res);
734
735         EXIT;
736 }
737 EXPORT_SYMBOL(ldlm_resource_prolong);
738
739
740 /**
741  * Discard all AST work items from list.
742  *
743  * If for whatever reason we do not want to send ASTs to conflicting locks
744  * anymore, disassemble the list with this function.
745  */
746 static void discard_bl_list(struct list_head *bl_list)
747 {
748         struct list_head *tmp, *pos;
749         ENTRY;
750
751         list_for_each_safe(pos, tmp, bl_list) {
752                 struct ldlm_lock *lock =
753                         list_entry(pos, struct ldlm_lock, l_bl_ast);
754
755                 list_del_init(&lock->l_bl_ast);
756                 LASSERT(ldlm_is_ast_sent(lock));
757                 ldlm_clear_ast_sent(lock);
758                 LASSERT(lock->l_bl_ast_run == 0);
759                 LASSERT(lock->l_blocking_lock);
760                 LDLM_LOCK_RELEASE(lock->l_blocking_lock);
761                 lock->l_blocking_lock = NULL;
762                 LDLM_LOCK_RELEASE(lock);
763         }
764         EXIT;
765 }
766
767 /**
768  * Process a granting attempt for extent lock.
769  * Must be called with ns lock held.
770  *
771  * This function looks for any conflicts for \a lock in the granted or
772  * waiting queues. The lock is granted if no conflicts are found in
773  * either queue.
774  *
775  * If \a first_enq is 0 (ie, called from ldlm_reprocess_queue):
776  *   - blocking ASTs have already been sent
777  *
778  * If \a first_enq is 1 (ie, called from ldlm_lock_enqueue):
779  *   - blocking ASTs have not been sent yet, so list of conflicting locks
780  *     would be collected and ASTs sent.
781  */
782 int ldlm_process_extent_lock(struct ldlm_lock *lock, __u64 *flags,
783                              int first_enq, enum ldlm_error *err,
784                              struct list_head *work_list)
785 {
786         struct ldlm_resource *res = lock->l_resource;
787         struct list_head rpc_list;
788         int rc, rc2;
789         int contended_locks = 0;
790         ENTRY;
791
792         LASSERT(lock->l_granted_mode != lock->l_req_mode);
793         LASSERT(list_empty(&res->lr_converting));
794         LASSERT(!(*flags & LDLM_FL_DENY_ON_CONTENTION) ||
795                 !ldlm_is_ast_discard_data(lock));
796         INIT_LIST_HEAD(&rpc_list);
797         check_res_locked(res);
798         *err = ELDLM_OK;
799
800         if (!first_enq) {
801                 /* Careful observers will note that we don't handle -EWOULDBLOCK
802                  * here, but it's ok for a non-obvious reason -- compat_queue
803                  * can only return -EWOULDBLOCK if (flags & BLOCK_NOWAIT).
804                  * flags should always be zero here, and if that ever stops
805                  * being true, we want to find out. */
806                 LASSERT(*flags == 0);
807                 rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
808                                               err, NULL, &contended_locks);
809                 if (rc == 1) {
810                         rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
811                                                       flags, err, NULL,
812                                                       &contended_locks);
813                 }
814                 if (rc == 0)
815                         RETURN(LDLM_ITER_STOP);
816
817                 ldlm_resource_unlink_lock(lock);
818
819                 if (!OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_EVICT_RACE))
820                         ldlm_extent_policy(res, lock, flags);
821                 ldlm_grant_lock(lock, work_list);
822                 RETURN(LDLM_ITER_CONTINUE);
823         }
824
825  restart:
826         contended_locks = 0;
827         rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
828                                       &rpc_list, &contended_locks);
829         if (rc < 0)
830                 GOTO(out, rc); /* lock was destroyed */
831         if (rc == 2)
832                 goto grant;
833
834         rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, err,
835                                        &rpc_list, &contended_locks);
836         if (rc2 < 0)
837                 GOTO(out, rc = rc2); /* lock was destroyed */
838
839         if (rc + rc2 == 2) {
840         grant:
841                 ldlm_extent_policy(res, lock, flags);
842                 ldlm_resource_unlink_lock(lock);
843                 ldlm_grant_lock(lock, NULL);
844         } else {
845                 /* If either of the compat_queue()s returned failure, then we
846                  * have ASTs to send and must go onto the waiting list.
847                  *
848                  * bug 2322: we used to unlink and re-add here, which was a
849                  * terrible folly -- if we goto restart, we could get
850                  * re-ordered!  Causes deadlock, because ASTs aren't sent! */
851                 if (list_empty(&lock->l_res_link))
852                         ldlm_resource_add_lock(res, &res->lr_waiting, lock);
853                 unlock_res(res);
854                 rc = ldlm_run_ast_work(ldlm_res_to_ns(res), &rpc_list,
855                                        LDLM_WORK_BL_AST);
856
857                 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
858                     !ns_is_client(ldlm_res_to_ns(res)))
859                         class_fail_export(lock->l_export);
860
861                 lock_res(res);
862                 if (rc == -ERESTART) {
863                         /* 15715: The lock was granted and destroyed after
864                          * resource lock was dropped. Interval node was freed
865                          * in ldlm_lock_destroy. Anyway, this always happens
866                          * when a client is being evicted. So it would be
867                          * ok to return an error. -jay */
868                         if (ldlm_is_destroyed(lock)) {
869                                 *err = -EAGAIN;
870                                 GOTO(out, rc = -EAGAIN);
871                         }
872
873                         /* lock was granted while resource was unlocked. */
874                         if (lock->l_granted_mode == lock->l_req_mode) {
875                                 /* bug 11300: if the lock has been granted,
876                                  * break earlier because otherwise, we will go
877                                  * to restart and ldlm_resource_unlink will be
878                                  * called and it causes the interval node to be
879                                  * freed. Then we will fail at
880                                  * ldlm_extent_add_lock() */
881                                 *flags &= ~LDLM_FL_BLOCKED_MASK;
882                                 GOTO(out, rc = 0);
883                         }
884
885                         GOTO(restart, rc);
886                 }
887
888                 /* this way we force client to wait for the lock
889                  * endlessly once the lock is enqueued -bzzz */
890                 *flags |= LDLM_FL_BLOCK_GRANTED | LDLM_FL_NO_TIMEOUT;
891
892         }
893         RETURN(0);
894 out:
895         if (!list_empty(&rpc_list)) {
896                 LASSERT(!ldlm_is_ast_discard_data(lock));
897                 discard_bl_list(&rpc_list);
898         }
899         RETURN(rc);
900 }
901 #endif /* HAVE_SERVER_SUPPORT */
902
903 /* When a lock is cancelled by a client, the KMS may undergo change if this
904  * is the "highest lock".  This function returns the new KMS value.
905  * Caller must hold lr_lock already.
906  *
907  * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
908 __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
909 {
910         struct ldlm_resource *res = lock->l_resource;
911         struct list_head *tmp;
912         struct ldlm_lock *lck;
913         __u64 kms = 0;
914         ENTRY;
915
916         /* don't let another thread in ldlm_extent_shift_kms race in
917          * just after we finish and take our lock into account in its
918          * calculation of the kms */
919         ldlm_set_kms_ignore(lock);
920
921         list_for_each(tmp, &res->lr_granted) {
922                 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
923
924                 if (ldlm_is_kms_ignore(lck))
925                         continue;
926
927                 if (lck->l_policy_data.l_extent.end >= old_kms)
928                         RETURN(old_kms);
929
930                 /* This extent _has_ to be smaller than old_kms (checked above)
931                  * so kms can only ever be smaller or the same as old_kms. */
932                 if (lck->l_policy_data.l_extent.end + 1 > kms)
933                         kms = lck->l_policy_data.l_extent.end + 1;
934         }
935         LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
936
937         RETURN(kms);
938 }
939 EXPORT_SYMBOL(ldlm_extent_shift_kms);
940
941 struct kmem_cache *ldlm_interval_slab;
942 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
943 {
944         struct ldlm_interval *node;
945         ENTRY;
946
947         LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
948         OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, GFP_NOFS);
949         if (node == NULL)
950                 RETURN(NULL);
951
952         INIT_LIST_HEAD(&node->li_group);
953         ldlm_interval_attach(node, lock);
954         RETURN(node);
955 }
956
957 void ldlm_interval_free(struct ldlm_interval *node)
958 {
959         if (node) {
960                 LASSERT(list_empty(&node->li_group));
961                 LASSERT(!interval_is_intree(&node->li_node));
962                 OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
963         }
964 }
965
966 /* interval tree, for LDLM_EXTENT. */
967 void ldlm_interval_attach(struct ldlm_interval *n,
968                           struct ldlm_lock *l)
969 {
970         LASSERT(l->l_tree_node == NULL);
971         LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
972
973         list_add_tail(&l->l_sl_policy, &n->li_group);
974         l->l_tree_node = n;
975 }
976
977 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
978 {
979         struct ldlm_interval *n = l->l_tree_node;
980
981         if (n == NULL)
982                 return NULL;
983
984         LASSERT(!list_empty(&n->li_group));
985         l->l_tree_node = NULL;
986         list_del_init(&l->l_sl_policy);
987
988         return list_empty(&n->li_group) ? n : NULL;
989 }
990
991 static inline int ldlm_mode_to_index(enum ldlm_mode mode)
992 {
993         int index;
994
995         LASSERT(mode != 0);
996         LASSERT(IS_PO2(mode));
997         for (index = -1; mode != 0; index++, mode >>= 1)
998                 /* do nothing */;
999         LASSERT(index < LCK_MODE_NUM);
1000         return index;
1001 }
1002
1003 /** Add newly granted lock into interval tree for the resource. */
1004 void ldlm_extent_add_lock(struct ldlm_resource *res,
1005                           struct ldlm_lock *lock)
1006 {
1007         struct interval_node *found, **root;
1008         struct ldlm_interval *node;
1009         struct ldlm_extent *extent;
1010         int idx;
1011
1012         LASSERT(lock->l_granted_mode == lock->l_req_mode);
1013
1014         node = lock->l_tree_node;
1015         LASSERT(node != NULL);
1016         LASSERT(!interval_is_intree(&node->li_node));
1017
1018         idx = ldlm_mode_to_index(lock->l_granted_mode);
1019         LASSERT(lock->l_granted_mode == 1 << idx);
1020         LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
1021
1022         /* node extent initialize */
1023         extent = &lock->l_policy_data.l_extent;
1024         interval_set(&node->li_node, extent->start, extent->end);
1025
1026         root = &res->lr_itree[idx].lit_root;
1027         found = interval_insert(&node->li_node, root);
1028         if (found) { /* The policy group found. */
1029                 struct ldlm_interval *tmp = ldlm_interval_detach(lock);
1030                 LASSERT(tmp != NULL);
1031                 ldlm_interval_free(tmp);
1032                 ldlm_interval_attach(to_ldlm_interval(found), lock);
1033         }
1034         res->lr_itree[idx].lit_size++;
1035
1036         /* even though we use interval tree to manage the extent lock, we also
1037          * add the locks into grant list, for debug purpose, .. */
1038         ldlm_resource_add_lock(res, &res->lr_granted, lock);
1039 }
1040
1041 /** Remove cancelled lock from resource interval tree. */
1042 void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
1043 {
1044         struct ldlm_resource *res = lock->l_resource;
1045         struct ldlm_interval *node = lock->l_tree_node;
1046         struct ldlm_interval_tree *tree;
1047         int idx;
1048
1049         if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
1050                 return;
1051
1052         idx = ldlm_mode_to_index(lock->l_granted_mode);
1053         LASSERT(lock->l_granted_mode == 1 << idx);
1054         tree = &res->lr_itree[idx];
1055
1056         LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
1057
1058         tree->lit_size--;
1059         node = ldlm_interval_detach(lock);
1060         if (node) {
1061                 interval_erase(&node->li_node, &tree->lit_root);
1062                 ldlm_interval_free(node);
1063         }
1064 }
1065
1066 void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
1067                                       union ldlm_policy_data *lpolicy)
1068 {
1069         memset(lpolicy, 0, sizeof(*lpolicy));
1070         lpolicy->l_extent.start = wpolicy->l_extent.start;
1071         lpolicy->l_extent.end = wpolicy->l_extent.end;
1072         lpolicy->l_extent.gid = wpolicy->l_extent.gid;
1073 }
1074
1075 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
1076                                       union ldlm_wire_policy_data *wpolicy)
1077 {
1078         memset(wpolicy, 0, sizeof(*wpolicy));
1079         wpolicy->l_extent.start = lpolicy->l_extent.start;
1080         wpolicy->l_extent.end = lpolicy->l_extent.end;
1081         wpolicy->l_extent.gid = lpolicy->l_extent.gid;
1082 }
1083