Whamcloud - gitweb
b=19663,19910
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov lov @{ */
46
47 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
48                                                struct cl_lock *parent);
49
50 /*****************************************************************************
51  *
52  * Lov lock operations.
53  *
54  */
55
56 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
57                                                    struct cl_lock *parent,
58                                                    struct lov_lock_sub *lls)
59 {
60         struct lov_sublock_env *subenv;
61         struct lov_io          *lio    = lov_env_io(env);
62         struct cl_io           *io     = lio->lis_cl.cis_io;
63         struct lov_io_sub      *sub;
64
65         subenv = &lov_env_session(env)->ls_subenv;
66
67         /*
68          * FIXME: We tend to use the subio's env & io to call the sublock
69          * lock operations because osc lock sometimes stores some control
70          * variables in thread's IO infomation(Now only lockless information).
71          * However, if the lock's host(object) is different from the object
72          * for current IO, we have no way to get the subenv and subio because
73          * they are not initialized at all. As a temp fix, in this case,
74          * we still borrow the parent's env to call sublock operations.
75          */
76         if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
77                 subenv->lse_env = env;
78                 subenv->lse_io  = io;
79                 subenv->lse_sub = NULL;
80         } else {
81                 LASSERT(io != NULL);
82                 sub = lov_sub_get(env, lio, lls->sub_stripe);
83                 if (!IS_ERR(sub)) {
84                         subenv->lse_env = sub->sub_env;
85                         subenv->lse_io  = sub->sub_io;
86                         subenv->lse_sub = sub;
87                 } else {
88                         subenv = (void*)sub;
89                 }
90         }
91         return subenv;
92 }
93
94 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
95 {
96         if (subenv && subenv->lse_sub)
97                 lov_sub_put(subenv->lse_sub);
98 }
99
100 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
101                               struct cl_lock *sublock, int idx,
102                               struct lov_lock_link *link)
103 {
104         struct lovsub_lock *lsl;
105         struct cl_lock     *parent = lck->lls_cl.cls_lock;
106         int                 rc;
107
108         LASSERT(cl_lock_is_mutexed(parent));
109         LASSERT(cl_lock_is_mutexed(sublock));
110         ENTRY;
111
112         lsl = cl2sub_lock(sublock);
113         /*
114          * check that sub-lock doesn't have lock link to this top-lock.
115          */
116         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
117         LASSERT(idx < lck->lls_nr);
118
119         lck->lls_sub[idx].sub_lock = lsl;
120         lck->lls_nr_filled++;
121         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
122         list_add_tail(&link->lll_list, &lsl->lss_parents);
123         link->lll_idx = idx;
124         link->lll_super = lck;
125         cl_lock_get(parent);
126         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
127         lck->lls_sub[idx].sub_flags |= LSF_HELD;
128         cl_lock_user_add(env, sublock);
129
130         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
131         LASSERT(rc == 0); /* there is no way this can fail, currently */
132         EXIT;
133 }
134
135 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
136                                          const struct cl_io *io,
137                                          struct lov_lock *lck,
138                                          int idx, struct lov_lock_link **out)
139 {
140         struct cl_lock       *sublock;
141         struct cl_lock       *parent;
142         struct lov_lock_link *link;
143
144         LASSERT(idx < lck->lls_nr);
145         ENTRY;
146
147         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
148         if (link != NULL) {
149                 struct lov_sublock_env *subenv;
150                 struct lov_lock_sub  *lls;
151                 struct cl_lock_descr *descr;
152
153                 parent = lck->lls_cl.cls_lock;
154                 lls    = &lck->lls_sub[idx];
155                 descr  = &lls->sub_descr;
156
157                 subenv = lov_sublock_env_get(env, parent, lls);
158                 if (!IS_ERR(subenv)) {
159                         /* CAVEAT: Don't try to add a field in lov_lock_sub
160                          * to remember the subio. This is because lock is able
161                          * to be cached, but this is not true for IO. This
162                          * further means a sublock might be referenced in
163                          * different io context. -jay */
164
165                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166                                                descr, "lov-parent", parent);
167                         lov_sublock_env_put(subenv);
168                 } else {
169                         /* error occurs. */
170                         sublock = (void*)subenv;
171                 }
172
173                 if (!IS_ERR(sublock))
174                         *out = link;
175                 else
176                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
177         } else
178                 sublock = ERR_PTR(-ENOMEM);
179         RETURN(sublock);
180 }
181
182 static void lov_sublock_unlock(const struct lu_env *env,
183                                struct lovsub_lock *lsl,
184                                struct cl_lock_closure *closure,
185                                struct lov_sublock_env *subenv)
186 {
187         ENTRY;
188         lov_sublock_env_put(subenv);
189         lsl->lss_active = NULL;
190         cl_lock_disclosure(env, closure);
191         EXIT;
192 }
193
194 static int lov_sublock_lock(const struct lu_env *env,
195                             struct lov_lock *lck,
196                             struct lov_lock_sub *lls,
197                             struct cl_lock_closure *closure,
198                             struct lov_sublock_env **lsep)
199 {
200         struct lovsub_lock *sublock;
201         struct cl_lock     *child;
202         int                 result = 0;
203         ENTRY;
204
205         LASSERT(list_empty(&closure->clc_list));
206
207         sublock = lls->sub_lock;
208         child = sublock->lss_cl.cls_lock;
209         result = cl_lock_closure_build(env, child, closure);
210         if (result == 0) {
211                 struct cl_lock *parent = closure->clc_origin;
212
213                 LASSERT(cl_lock_is_mutexed(child));
214                 sublock->lss_active = parent;
215
216                 if (unlikely(child->cll_state == CLS_FREEING)) {
217                         struct lov_lock_link *link;
218                         /*
219                          * we could race with lock deletion which temporarily
220                          * put the lock in freeing state, bug 19080.
221                          */
222                         LASSERT(!(lls->sub_flags & LSF_HELD));
223
224                         link = lov_lock_link_find(env, lck, sublock);
225                         LASSERT(link != NULL);
226                         lov_lock_unlink(env, link, sublock);
227                         lov_sublock_unlock(env, sublock, closure, NULL);
228                         result = CLO_REPEAT;
229                 } else if (lsep) {
230                         struct lov_sublock_env *subenv;
231                         subenv = lov_sublock_env_get(env, parent, lls);
232                         if (IS_ERR(subenv)) {
233                                 lov_sublock_unlock(env, sublock,
234                                                    closure, NULL);
235                                 result = PTR_ERR(subenv);
236                         } else {
237                                 *lsep = subenv;
238                         }
239                 }
240         }
241         RETURN(result);
242 }
243
244 /**
245  * Updates the result of a top-lock operation from a result of sub-lock
246  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
247  * over sub-locks and lov_subresult() is used to calculate return value of a
248  * top-operation. To this end, possible return values of sub-operations are
249  * ordered as
250  *
251  *     - 0                  success
252  *     - CLO_WAIT           wait for event
253  *     - CLO_REPEAT         repeat top-operation
254  *     - -ne                fundamental error
255  *
256  * Top-level return code can only go down through this list. CLO_REPEAT
257  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
258  * has to be rechecked by the upper layer.
259  */
260 static int lov_subresult(int result, int rc)
261 {
262         int result_rank;
263         int rc_rank;
264
265         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
266         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
267         CLASSERT(CLO_WAIT < CLO_REPEAT);
268
269         ENTRY;
270
271         /* calculate ranks in the ordering above */
272         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
273         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
274
275         if (result_rank < rc_rank)
276                 result = rc;
277         RETURN(result);
278 }
279
280 /**
281  * Creates sub-locks for a given lov_lock for the first time.
282  *
283  * Goes through all sub-objects of top-object, and creates sub-locks on every
284  * sub-object intersecting with top-lock extent. This is complicated by the
285  * fact that top-lock (that is being created) can be accessed concurrently
286  * through already created sub-locks (possibly shared with other top-locks).
287  */
288 static int lov_lock_sub_init(const struct lu_env *env,
289                              struct lov_lock *lck, const struct cl_io *io)
290 {
291         int result = 0;
292         int i;
293         int j;
294         int nr;
295         int stripe;
296         int start_stripe;
297         obd_off start;
298         obd_off end;
299         obd_off file_start;
300         obd_off file_end;
301
302         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
303         struct lov_layout_raid0 *r0     = lov_r0(loo);
304         struct cl_lock          *parent = lck->lls_cl.cls_lock;
305
306         ENTRY;
307
308         lck->lls_orig = parent->cll_descr;
309         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
310         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
311
312         start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
313         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
314                 /*
315                  * XXX for wide striping smarter algorithm is desirable,
316                  * breaking out of the loop, early.
317                  */
318                 stripe = (start_stripe + i) % r0->lo_nr;
319                 if (lov_stripe_intersects(r0->lo_lsm, stripe,
320                                           file_start, file_end, &start, &end))
321                         nr++;
322         }
323         LASSERT(nr > 0);
324         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
325         if (lck->lls_sub == NULL)
326                 RETURN(-ENOMEM);
327
328         lck->lls_nr = nr;
329         /*
330          * First, fill in sub-lock descriptions in
331          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
332          * (called below in this function, and by lov_lock_enqueue()) to
333          * create sub-locks. At this moment, no other thread can access
334          * top-lock.
335          */
336         for (j = 0, nr = 0; j < i; ++j) {
337                 stripe = (start_stripe + j) % r0->lo_nr;
338                 if (lov_stripe_intersects(r0->lo_lsm, stripe,
339                                           file_start, file_end, &start, &end)) {
340                         struct cl_lock_descr *descr;
341
342                         descr = &lck->lls_sub[nr].sub_descr;
343
344                         LASSERT(descr->cld_obj == NULL);
345                         descr->cld_obj   = lovsub2cl(r0->lo_sub[stripe]);
346                         descr->cld_start = cl_index(descr->cld_obj, start);
347                         descr->cld_end   = cl_index(descr->cld_obj, end);
348                         descr->cld_mode  = parent->cll_descr.cld_mode;
349                         descr->cld_gid   = parent->cll_descr.cld_gid;
350                         /* XXX has no effect */
351                         lck->lls_sub[nr].sub_got = *descr;
352                         lck->lls_sub[nr].sub_stripe = stripe;
353                         nr++;
354                 }
355         }
356         LASSERT(nr == lck->lls_nr);
357         /*
358          * Then, create sub-locks. Once at least one sub-lock was created,
359          * top-lock can be reached by other threads.
360          */
361         for (i = 0; i < lck->lls_nr; ++i) {
362                 struct cl_lock       *sublock;
363                 struct lov_lock_link *link;
364
365                 if (lck->lls_sub[i].sub_lock == NULL) {
366                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
367                         if (IS_ERR(sublock)) {
368                                 result = PTR_ERR(sublock);
369                                 break;
370                         }
371                         cl_lock_mutex_get(env, sublock);
372                         cl_lock_mutex_get(env, parent);
373                         /*
374                          * recheck under mutex that sub-lock wasn't created
375                          * concurrently, and that top-lock is still alive.
376                          */
377                         if (lck->lls_sub[i].sub_lock == NULL &&
378                             parent->cll_state < CLS_FREEING) {
379                                 lov_sublock_adopt(env, lck, sublock, i, link);
380                                 cl_lock_mutex_put(env, parent);
381                         } else {
382                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
383                                 cl_lock_mutex_put(env, parent);
384                                 cl_lock_unhold(env, sublock,
385                                                "lov-parent", parent);
386                         }
387                         cl_lock_mutex_put(env, sublock);
388                 }
389         }
390         /*
391          * Some sub-locks can be missing at this point. This is not a problem,
392          * because enqueue will create them anyway. Main duty of this function
393          * is to fill in sub-lock descriptions in a race free manner.
394          */
395         RETURN(result);
396 }
397
398 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
399                                int i, int deluser, int rc)
400 {
401         struct cl_lock *parent = lck->lls_cl.cls_lock;
402
403         LASSERT(cl_lock_is_mutexed(parent));
404         ENTRY;
405
406         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
407                 struct cl_lock    *sublock;
408                 int dying;
409
410                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
411                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
412                 LASSERT(cl_lock_is_mutexed(sublock));
413
414                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
415                 if (deluser)
416                         cl_lock_user_del(env, sublock);
417                 /*
418                  * If the last hold is released, and cancellation is pending
419                  * for a sub-lock, release parent mutex, to avoid keeping it
420                  * while sub-lock is being paged out.
421                  */
422                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
423                          sublock->cll_descr.cld_mode == CLM_GROUP ||
424                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
425                         sublock->cll_holds == 1;
426                 if (dying)
427                         cl_lock_mutex_put(env, parent);
428                 cl_lock_unhold(env, sublock, "lov-parent", parent);
429                 if (dying) {
430                         cl_lock_mutex_get(env, parent);
431                         rc = lov_subresult(rc, CLO_REPEAT);
432                 }
433                 /*
434                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
435                  * not backed by a reference on a
436                  * sub-lock. lovsub_lock_delete() will clear
437                  * lck->lls_sub[i].sub_lock under semaphores, just before
438                  * sub-lock is destroyed.
439                  */
440         }
441         RETURN(rc);
442 }
443
444 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
445                              int i)
446 {
447         struct cl_lock *parent = lck->lls_cl.cls_lock;
448
449         LASSERT(cl_lock_is_mutexed(parent));
450         ENTRY;
451
452         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
453                 struct cl_lock *sublock;
454
455                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
456                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
457                 LASSERT(cl_lock_is_mutexed(sublock));
458                 LASSERT(sublock->cll_state != CLS_FREEING);
459
460                 lck->lls_sub[i].sub_flags |= LSF_HELD;
461
462                 cl_lock_get_trust(sublock);
463                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
464                 cl_lock_user_add(env, sublock);
465                 cl_lock_put(env, sublock);
466         }
467         EXIT;
468 }
469
470 static void lov_lock_fini(const struct lu_env *env,
471                           struct cl_lock_slice *slice)
472 {
473         struct lov_lock *lck;
474         int i;
475
476         ENTRY;
477         lck = cl2lov_lock(slice);
478         LASSERT(lck->lls_nr_filled == 0);
479         if (lck->lls_sub != NULL) {
480                 for (i = 0; i < lck->lls_nr; ++i)
481                         /*
482                          * No sub-locks exists at this point, as sub-lock has
483                          * a reference on its parent.
484                          */
485                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
486                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
487         }
488         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
489         EXIT;
490 }
491
492 /**
493  * Tries to advance a state machine of a given sub-lock toward enqueuing of
494  * the top-lock.
495  *
496  * \retval 0 if state-transition can proceed
497  * \retval -ve otherwise.
498  */
499 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
500                                 struct cl_lock *sublock,
501                                 struct cl_io *io, __u32 enqflags, int last)
502 {
503         int result;
504         ENTRY;
505
506         /* first, try to enqueue a sub-lock ... */
507         result = cl_enqueue_try(env, sublock, io, enqflags);
508         if (sublock->cll_state == CLS_ENQUEUED)
509                 /* if it is enqueued, try to `wait' on it---maybe it's already
510                  * granted */
511                 result = cl_wait_try(env, sublock);
512         /*
513          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
514          * parallel, otherwise---enqueue has to wait until sub-lock is granted
515          * before proceeding to the next one.
516          */
517         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
518             enqflags & CEF_ASYNC && !last)
519                 result = 0;
520         RETURN(result);
521 }
522
523 /**
524  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
525  */
526 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
527                             struct cl_io *io, struct lov_lock *lck, int idx)
528 {
529         struct lov_lock_link *link;
530         struct cl_lock       *sublock;
531         int                   result;
532
533         LASSERT(parent->cll_depth == 1);
534         cl_lock_mutex_put(env, parent);
535         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
536         if (!IS_ERR(sublock))
537                 cl_lock_mutex_get(env, sublock);
538         cl_lock_mutex_get(env, parent);
539
540         if (!IS_ERR(sublock)) {
541                 if (parent->cll_state == CLS_QUEUING &&
542                     lck->lls_sub[idx].sub_lock == NULL)
543                         lov_sublock_adopt(env, lck, sublock, idx, link);
544                 else {
545                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
546                         /* other thread allocated sub-lock, or enqueue is no
547                          * longer going on */
548                         cl_lock_mutex_put(env, parent);
549                         cl_lock_unhold(env, sublock, "lov-parent", parent);
550                         cl_lock_mutex_get(env, parent);
551                 }
552                 cl_lock_mutex_put(env, sublock);
553                 result = CLO_REPEAT;
554         } else
555                 result = PTR_ERR(sublock);
556         return result;
557 }
558
559 /**
560  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
561  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
562  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
563  * state machines in the face of sub-locks sharing (by multiple top-locks),
564  * and concurrent sub-lock cancellations.
565  */
566 static int lov_lock_enqueue(const struct lu_env *env,
567                             const struct cl_lock_slice *slice,
568                             struct cl_io *io, __u32 enqflags)
569 {
570         struct cl_lock         *lock    = slice->cls_lock;
571         struct lov_lock        *lck     = cl2lov_lock(slice);
572         struct cl_lock_closure *closure = lov_closure_get(env, lock);
573         int i;
574         int result;
575         enum cl_lock_state minstate;
576
577         ENTRY;
578
579         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
580                 int rc;
581                 struct lovsub_lock     *sub;
582                 struct lov_lock_sub    *lls;
583                 struct cl_lock         *sublock;
584                 struct lov_sublock_env *subenv;
585
586                 if (lock->cll_state != CLS_QUEUING) {
587                         /*
588                          * Lock might have left QUEUING state if previous
589                          * iteration released its mutex. Stop enqueing in this
590                          * case and let the upper layer to decide what to do.
591                          */
592                         LASSERT(i > 0 && result != 0);
593                         break;
594                 }
595
596                 lls = &lck->lls_sub[i];
597                 sub = lls->sub_lock;
598                 /*
599                  * Sub-lock might have been canceled, while top-lock was
600                  * cached.
601                  */
602                 if (sub == NULL) {
603                         result = lov_sublock_fill(env, lock, io, lck, i);
604                         /* lov_sublock_fill() released @lock mutex,
605                          * restart. */
606                         break;
607                 }
608                 sublock = sub->lss_cl.cls_lock;
609                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
610                 if (rc == 0) {
611                         lov_sublock_hold(env, lck, i);
612                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
613                                                   subenv->lse_io, enqflags,
614                                                   i == lck->lls_nr - 1);
615                         minstate = min(minstate, sublock->cll_state);
616                         /*
617                          * Don't hold a sub-lock in CLS_CACHED state, see
618                          * description for lov_lock::lls_sub.
619                          */
620                         if (sublock->cll_state > CLS_HELD)
621                                 rc = lov_sublock_release(env, lck, i, 1, rc);
622                         lov_sublock_unlock(env, sub, closure, subenv);
623                 }
624                 result = lov_subresult(result, rc);
625                 if (result < 0)
626                         break;
627         }
628         cl_lock_closure_fini(closure);
629         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
630 }
631
632 static int lov_lock_unuse(const struct lu_env *env,
633                           const struct cl_lock_slice *slice)
634 {
635         struct lov_lock        *lck     = cl2lov_lock(slice);
636         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
637         int i;
638         int result;
639
640         ENTRY;
641
642         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
643                 int rc;
644                 struct lovsub_lock     *sub;
645                 struct cl_lock         *sublock;
646                 struct lov_lock_sub    *lls;
647                 struct lov_sublock_env *subenv;
648
649                 /* top-lock state cannot change concurrently, because single
650                  * thread (one that released the last hold) carries unlocking
651                  * to the completion. */
652                 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
653                 lls = &lck->lls_sub[i];
654                 sub = lls->sub_lock;
655                 if (sub == NULL)
656                         continue;
657
658                 sublock = sub->lss_cl.cls_lock;
659                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
660                 if (rc == 0) {
661                         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
662                                 LASSERT(sublock->cll_state == CLS_HELD);
663                                 rc = cl_unuse_try(subenv->lse_env, sublock);
664                                 if (rc != CLO_WAIT)
665                                         rc = lov_sublock_release(env, lck,
666                                                                  i, 0, rc);
667                         }
668                         lov_sublock_unlock(env, sub, closure, subenv);
669                 }
670                 result = lov_subresult(result, rc);
671                 if (result < 0)
672                         break;
673         }
674         if (result == 0 && lck->lls_unuse_race) {
675                 lck->lls_unuse_race = 0;
676                 result = -ESTALE;
677         }
678         cl_lock_closure_fini(closure);
679         RETURN(result);
680 }
681
682 static int lov_lock_wait(const struct lu_env *env,
683                          const struct cl_lock_slice *slice)
684 {
685         struct lov_lock        *lck     = cl2lov_lock(slice);
686         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
687         enum cl_lock_state      minstate;
688         int                     result;
689         int                     i;
690
691         ENTRY;
692
693         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
694                 int rc;
695                 struct lovsub_lock     *sub;
696                 struct cl_lock         *sublock;
697                 struct lov_lock_sub    *lls;
698                 struct lov_sublock_env *subenv;
699
700                 lls = &lck->lls_sub[i];
701                 sub = lls->sub_lock;
702                 LASSERT(sub != NULL);
703                 sublock = sub->lss_cl.cls_lock;
704                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
705                 if (rc == 0) {
706                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
707                         if (sublock->cll_state < CLS_HELD)
708                                 rc = cl_wait_try(env, sublock);
709
710                         minstate = min(minstate, sublock->cll_state);
711                         lov_sublock_unlock(env, sub, closure, subenv);
712                 }
713                 result = lov_subresult(result, rc);
714                 if (result < 0)
715                         break;
716         }
717         cl_lock_closure_fini(closure);
718         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
719 }
720
721 static int lov_lock_use(const struct lu_env *env,
722                         const struct cl_lock_slice *slice)
723 {
724         struct lov_lock        *lck     = cl2lov_lock(slice);
725         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
726         int                     result;
727         int                     i;
728
729         LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
730         ENTRY;
731
732         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
733                 int rc;
734                 struct lovsub_lock     *sub;
735                 struct cl_lock         *sublock;
736                 struct lov_lock_sub    *lls;
737                 struct lov_sublock_env *subenv;
738
739                 if (slice->cls_lock->cll_state != CLS_CACHED) {
740                         /* see comment in lov_lock_enqueue(). */
741                         LASSERT(i > 0 && result != 0);
742                         break;
743                 }
744                 /*
745                  * if a sub-lock was destroyed while top-lock was in
746                  * CLS_CACHED state, top-lock would have been moved into
747                  * CLS_NEW state, so all sub-locks have to be in place.
748                  */
749                 lls = &lck->lls_sub[i];
750                 sub = lls->sub_lock;
751                 LASSERT(sub != NULL);
752                 sublock = sub->lss_cl.cls_lock;
753                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
754                 if (rc == 0) {
755                         LASSERT(sublock->cll_state != CLS_FREEING);
756                         lov_sublock_hold(env, lck, i);
757                         if (sublock->cll_state == CLS_CACHED) {
758                                 rc = cl_use_try(subenv->lse_env, sublock);
759                                 if (rc != 0)
760                                         rc = lov_sublock_release(env, lck,
761                                                                  i, 1, rc);
762                         } else
763                                 rc = 0;
764                         lov_sublock_unlock(env, sub, closure, subenv);
765                 }
766                 result = lov_subresult(result, rc);
767                 if (result < 0)
768                         break;
769         }
770         cl_lock_closure_fini(closure);
771         RETURN(result);
772 }
773
774 #if 0
775 static int lock_lock_multi_match()
776 {
777         struct cl_lock          *lock    = slice->cls_lock;
778         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
779         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
780         struct lov_layout_raid0 *r0      = lov_r0(loo);
781         struct lov_lock_sub     *sub;
782         struct cl_object        *subobj;
783         obd_off  fstart;
784         obd_off  fend;
785         obd_off  start;
786         obd_off  end;
787         int i;
788
789         fstart = cl_offset(need->cld_obj, need->cld_start);
790         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
791         subneed->cld_mode = need->cld_mode;
792         cl_lock_mutex_get(env, lock);
793         for (i = 0; i < lov->lls_nr; ++i) {
794                 sub = &lov->lls_sub[i];
795                 if (sub->sub_lock == NULL)
796                         continue;
797                 subobj = sub->sub_descr.cld_obj;
798                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
799                                            fstart, fend, &start, &end))
800                         continue;
801                 subneed->cld_start = cl_index(subobj, start);
802                 subneed->cld_end   = cl_index(subobj, end);
803                 subneed->cld_obj   = subobj;
804                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
805                         result = 0;
806                         break;
807                 }
808         }
809         cl_lock_mutex_put(env, lock);
810 }
811 #endif
812
813 /**
814  * Check if the extent region \a descr is covered by \a child against the
815  * specific \a stripe.
816  */
817 static int lov_lock_stripe_is_matching(const struct lu_env *env,
818                                        struct lov_object *lov, int stripe,
819                                        const struct cl_lock_descr *child,
820                                        const struct cl_lock_descr *descr)
821 {
822         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
823         obd_off start;
824         obd_off end;
825         int result;
826
827         if (lov_r0(lov)->lo_nr == 1)
828                 return cl_lock_ext_match(child, descr);
829
830         /*
831          * For a multi-stripes object:
832          * - make sure the descr only covers child's stripe, and
833          * - check if extent is matching.
834          */
835         start = cl_offset(&lov->lo_cl, descr->cld_start);
836         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
837         result = end - start <= lsm->lsm_stripe_size &&
838                  stripe == lov_stripe_number(lsm, start) &&
839                  stripe == lov_stripe_number(lsm, end);
840         if (result) {
841                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
842                 obd_off sub_start;
843                 obd_off sub_end;
844
845                 subd->cld_obj  = NULL;   /* don't need sub object at all */
846                 subd->cld_mode = descr->cld_mode;
847                 subd->cld_gid  = descr->cld_gid;
848                 result = lov_stripe_intersects(lsm, stripe, start, end,
849                                                &sub_start, &sub_end);
850                 LASSERT(result);
851                 subd->cld_start = cl_index(child->cld_obj, sub_start);
852                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
853                 result = cl_lock_ext_match(child, subd);
854         }
855         return result;
856 }
857
858 /**
859  * An implementation of cl_lock_operations::clo_fits_into() method.
860  *
861  * Checks whether a lock (given by \a slice) is suitable for \a
862  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
863  * O_APPEND write.
864  *
865  * \see ccc_lock_fits_into().
866  */
867 static int lov_lock_fits_into(const struct lu_env *env,
868                               const struct cl_lock_slice *slice,
869                               const struct cl_lock_descr *need,
870                               const struct cl_io *io)
871 {
872         struct lov_lock   *lov = cl2lov_lock(slice);
873         struct lov_object *obj = cl2lov(slice->cls_obj);
874         int result;
875
876         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
877         LASSERT(lov->lls_nr > 0);
878
879         ENTRY;
880
881         if (need->cld_mode == CLM_GROUP)
882                 /*
883                  * always allow to match group lock.
884                  */
885                 result = cl_lock_ext_match(&lov->lls_orig, need);
886         else if (lov->lls_nr == 1) {
887                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
888                 result = lov_lock_stripe_is_matching(env,
889                                                      cl2lov(slice->cls_obj),
890                                                      lov->lls_sub[0].sub_stripe,
891                                                      got, need);
892         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
893                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
894                 /*
895                  * Multi-stripe locks are only suitable for `quick' IO and for
896                  * glimpse.
897                  */
898                 result = 0;
899         else
900                 /*
901                  * Most general case: multi-stripe existing lock, and
902                  * (potentially) multi-stripe @need lock. Check that @need is
903                  * covered by @lov's sub-locks.
904                  *
905                  * For now, ignore lock expansions made by the server, and
906                  * match against original lock extent.
907                  */
908                 result = cl_lock_ext_match(&lov->lls_orig, need);
909         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
910                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
911                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
912                result);
913         RETURN(result);
914 }
915
916 void lov_lock_unlink(const struct lu_env *env,
917                      struct lov_lock_link *link, struct lovsub_lock *sub)
918 {
919         struct lov_lock *lck    = link->lll_super;
920         struct cl_lock  *parent = lck->lls_cl.cls_lock;
921
922         LASSERT(cl_lock_is_mutexed(parent));
923         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
924         ENTRY;
925
926         list_del_init(&link->lll_list);
927         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
928         /* yank this sub-lock from parent's array */
929         lck->lls_sub[link->lll_idx].sub_lock = NULL;
930         LASSERT(lck->lls_nr_filled > 0);
931         lck->lls_nr_filled--;
932         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
933         cl_lock_put(env, parent);
934         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
935         EXIT;
936 }
937
938 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
939                                          struct lov_lock *lck,
940                                          struct lovsub_lock *sub)
941 {
942         struct lov_lock_link *scan;
943
944         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
945         ENTRY;
946
947         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
948                 if (scan->lll_super == lck)
949                         RETURN(scan);
950         }
951         RETURN(NULL);
952 }
953
954 /**
955  * An implementation of cl_lock_operations::clo_delete() method. This is
956  * invoked for "top-to-bottom" delete, when lock destruction starts from the
957  * top-lock, e.g., as a result of inode destruction.
958  *
959  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
960  * this is done separately elsewhere:
961  *
962  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
963  *       each sub-object, purging its locks;
964  *
965  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
966  *       left in the cache.
967  */
968 static void lov_lock_delete(const struct lu_env *env,
969                             const struct cl_lock_slice *slice)
970 {
971         struct lov_lock        *lck     = cl2lov_lock(slice);
972         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
973         int i;
974
975         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
976         ENTRY;
977
978         for (i = 0; i < lck->lls_nr; ++i) {
979                 struct lov_lock_sub *lls;
980                 struct lovsub_lock  *lsl;
981                 struct cl_lock      *sublock;
982                 int rc;
983
984                 lls = &lck->lls_sub[i];
985                 lsl = lls->sub_lock;
986                 if (lsl == NULL)
987                         continue;
988
989                 sublock = lsl->lss_cl.cls_lock;
990                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
991                 if (rc == 0) {
992                         if (lck->lls_sub[i].sub_flags & LSF_HELD)
993                                 lov_sublock_release(env, lck, i, 1, 0);
994                         if (sublock->cll_state < CLS_FREEING) {
995                                 struct lov_lock_link *link;
996
997                                 link = lov_lock_link_find(env, lck, lsl);
998                                 LASSERT(link != NULL);
999                                 lov_lock_unlink(env, link, lsl);
1000                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1001                         }
1002                         lov_sublock_unlock(env, lsl, closure, NULL);
1003                 } else if (rc == CLO_REPEAT) {
1004                         --i; /* repeat with this lock */
1005                 } else {
1006                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
1007                                       "Cannot get sub-lock for delete: %i\n",
1008                                       rc);
1009                 }
1010         }
1011         cl_lock_closure_fini(closure);
1012         EXIT;
1013 }
1014
1015 static int lov_lock_print(const struct lu_env *env, void *cookie,
1016                           lu_printer_t p, const struct cl_lock_slice *slice)
1017 {
1018         struct lov_lock *lck = cl2lov_lock(slice);
1019         int              i;
1020
1021         (*p)(env, cookie, "%d\n", lck->lls_nr);
1022         for (i = 0; i < lck->lls_nr; ++i) {
1023                 struct lov_lock_sub *sub;
1024
1025                 sub = &lck->lls_sub[i];
1026                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1027                 if (sub->sub_lock != NULL)
1028                         cl_lock_print(env, cookie, p,
1029                                       sub->sub_lock->lss_cl.cls_lock);
1030                 else
1031                         (*p)(env, cookie, "---\n");
1032         }
1033         return 0;
1034 }
1035
1036 static const struct cl_lock_operations lov_lock_ops = {
1037         .clo_fini      = lov_lock_fini,
1038         .clo_enqueue   = lov_lock_enqueue,
1039         .clo_wait      = lov_lock_wait,
1040         .clo_use       = lov_lock_use,
1041         .clo_unuse     = lov_lock_unuse,
1042         .clo_fits_into = lov_lock_fits_into,
1043         .clo_delete    = lov_lock_delete,
1044         .clo_print     = lov_lock_print
1045 };
1046
1047 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1048                         struct cl_lock *lock, const struct cl_io *io)
1049 {
1050         struct lov_lock *lck;
1051         int result;
1052
1053         ENTRY;
1054         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1055         if (lck != NULL) {
1056                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1057                 result = lov_lock_sub_init(env, lck, io);
1058         } else
1059                 result = -ENOMEM;
1060         RETURN(result);
1061 }
1062
1063 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1064                                                struct cl_lock *parent)
1065 {
1066         struct cl_lock_closure *closure;
1067
1068         closure = &lov_env_info(env)->lti_closure;
1069         LASSERT(list_empty(&closure->clc_list));
1070         cl_lock_closure_init(env, closure, parent, 1);
1071         return closure;
1072 }
1073
1074
1075 /** @} lov */