Whamcloud - gitweb
Branch:HEAD
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50                                                struct cl_lock *parent);
51
52 static int lov_lock_unuse(const struct lu_env *env,
53                           const struct cl_lock_slice *slice);
54 /*****************************************************************************
55  *
56  * Lov lock operations.
57  *
58  */
59
60 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61                                                    struct cl_lock *parent,
62                                                    struct lov_lock_sub *lls)
63 {
64         struct lov_sublock_env *subenv;
65         struct lov_io          *lio    = lov_env_io(env);
66         struct cl_io           *io     = lio->lis_cl.cis_io;
67         struct lov_io_sub      *sub;
68
69         subenv = &lov_env_session(env)->ls_subenv;
70
71         /*
72          * FIXME: We tend to use the subio's env & io to call the sublock
73          * lock operations because osc lock sometimes stores some control
74          * variables in thread's IO infomation(Now only lockless information).
75          * However, if the lock's host(object) is different from the object
76          * for current IO, we have no way to get the subenv and subio because
77          * they are not initialized at all. As a temp fix, in this case,
78          * we still borrow the parent's env to call sublock operations.
79          */
80         if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
81                 subenv->lse_env = env;
82                 subenv->lse_io  = io;
83                 subenv->lse_sub = NULL;
84         } else {
85                 sub = lov_sub_get(env, lio, lls->sub_stripe);
86                 if (!IS_ERR(sub)) {
87                         subenv->lse_env = sub->sub_env;
88                         subenv->lse_io  = sub->sub_io;
89                         subenv->lse_sub = sub;
90                 } else {
91                         subenv = (void*)sub;
92                 }
93         }
94         return subenv;
95 }
96
97 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
98 {
99         if (subenv && subenv->lse_sub)
100                 lov_sub_put(subenv->lse_sub);
101 }
102
103 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
104                               struct cl_lock *sublock, int idx,
105                               struct lov_lock_link *link)
106 {
107         struct lovsub_lock *lsl;
108         struct cl_lock     *parent = lck->lls_cl.cls_lock;
109         int                 rc;
110
111         LASSERT(cl_lock_is_mutexed(parent));
112         LASSERT(cl_lock_is_mutexed(sublock));
113         ENTRY;
114
115         lsl = cl2sub_lock(sublock);
116         /*
117          * check that sub-lock doesn't have lock link to this top-lock.
118          */
119         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
120         LASSERT(idx < lck->lls_nr);
121
122         lck->lls_sub[idx].sub_lock = lsl;
123         lck->lls_nr_filled++;
124         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
125         list_add_tail(&link->lll_list, &lsl->lss_parents);
126         link->lll_idx = idx;
127         link->lll_super = lck;
128         cl_lock_get(parent);
129         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
130         lck->lls_sub[idx].sub_flags |= LSF_HELD;
131         cl_lock_user_add(env, sublock);
132
133         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
134         LASSERT(rc == 0); /* there is no way this can fail, currently */
135         EXIT;
136 }
137
138 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
139                                          const struct cl_io *io,
140                                          struct lov_lock *lck,
141                                          int idx, struct lov_lock_link **out)
142 {
143         struct cl_lock       *sublock;
144         struct cl_lock       *parent;
145         struct lov_lock_link *link;
146
147         LASSERT(idx < lck->lls_nr);
148         ENTRY;
149
150         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
151         if (link != NULL) {
152                 struct lov_sublock_env *subenv;
153                 struct lov_lock_sub  *lls;
154                 struct cl_lock_descr *descr;
155
156                 parent = lck->lls_cl.cls_lock;
157                 lls    = &lck->lls_sub[idx];
158                 descr  = &lls->sub_descr;
159
160                 subenv = lov_sublock_env_get(env, parent, lls);
161                 if (!IS_ERR(subenv)) {
162                         /* CAVEAT: Don't try to add a field in lov_lock_sub
163                          * to remember the subio. This is because lock is able
164                          * to be cached, but this is not true for IO. This
165                          * further means a sublock might be referenced in
166                          * different io context. -jay */
167
168                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
169                                                descr, "lov-parent", parent);
170                         lov_sublock_env_put(subenv);
171                 } else {
172                         /* error occurs. */
173                         sublock = (void*)subenv;
174                 }
175
176                 if (!IS_ERR(sublock))
177                         *out = link;
178                 else
179                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
180         } else
181                 sublock = ERR_PTR(-ENOMEM);
182         RETURN(sublock);
183 }
184
185 static void lov_sublock_unlock(const struct lu_env *env,
186                                struct lovsub_lock *lsl,
187                                struct cl_lock_closure *closure,
188                                struct lov_sublock_env *subenv)
189 {
190         ENTRY;
191         lov_sublock_env_put(subenv);
192         lsl->lss_active = NULL;
193         cl_lock_disclosure(env, closure);
194         EXIT;
195 }
196
197 static int lov_sublock_lock(const struct lu_env *env,
198                             struct lov_lock *lck,
199                             struct lov_lock_sub *lls,
200                             struct cl_lock_closure *closure,
201                             struct lov_sublock_env **lsep)
202 {
203         struct lovsub_lock *sublock;
204         struct cl_lock     *child;
205         int                 result = 0;
206         ENTRY;
207
208         LASSERT(list_empty(&closure->clc_list));
209
210         sublock = lls->sub_lock;
211         child = sublock->lss_cl.cls_lock;
212         result = cl_lock_closure_build(env, child, closure);
213         if (result == 0) {
214                 struct cl_lock *parent = closure->clc_origin;
215
216                 LASSERT(cl_lock_is_mutexed(child));
217                 sublock->lss_active = parent;
218
219                 if (unlikely(child->cll_state == CLS_FREEING)) {
220                         struct lov_lock_link *link;
221                         /*
222                          * we could race with lock deletion which temporarily
223                          * put the lock in freeing state, bug 19080.
224                          */
225                         LASSERT(!(lls->sub_flags & LSF_HELD));
226
227                         link = lov_lock_link_find(env, lck, sublock);
228                         LASSERT(link != NULL);
229                         lov_lock_unlink(env, link, sublock);
230                         lov_sublock_unlock(env, sublock, closure, NULL);
231                         lck->lls_cancel_race = 1;
232                         result = CLO_REPEAT;
233                 } else if (lsep) {
234                         struct lov_sublock_env *subenv;
235                         subenv = lov_sublock_env_get(env, parent, lls);
236                         if (IS_ERR(subenv)) {
237                                 lov_sublock_unlock(env, sublock,
238                                                    closure, NULL);
239                                 result = PTR_ERR(subenv);
240                         } else {
241                                 *lsep = subenv;
242                         }
243                 }
244         }
245         RETURN(result);
246 }
247
248 /**
249  * Updates the result of a top-lock operation from a result of sub-lock
250  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
251  * over sub-locks and lov_subresult() is used to calculate return value of a
252  * top-operation. To this end, possible return values of sub-operations are
253  * ordered as
254  *
255  *     - 0                  success
256  *     - CLO_WAIT           wait for event
257  *     - CLO_REPEAT         repeat top-operation
258  *     - -ne                fundamental error
259  *
260  * Top-level return code can only go down through this list. CLO_REPEAT
261  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
262  * has to be rechecked by the upper layer.
263  */
264 static int lov_subresult(int result, int rc)
265 {
266         int result_rank;
267         int rc_rank;
268
269         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
270         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
271         CLASSERT(CLO_WAIT < CLO_REPEAT);
272
273         ENTRY;
274
275         /* calculate ranks in the ordering above */
276         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
277         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
278
279         if (result_rank < rc_rank)
280                 result = rc;
281         RETURN(result);
282 }
283
284 /**
285  * Creates sub-locks for a given lov_lock for the first time.
286  *
287  * Goes through all sub-objects of top-object, and creates sub-locks on every
288  * sub-object intersecting with top-lock extent. This is complicated by the
289  * fact that top-lock (that is being created) can be accessed concurrently
290  * through already created sub-locks (possibly shared with other top-locks).
291  */
292 static int lov_lock_sub_init(const struct lu_env *env,
293                              struct lov_lock *lck, const struct cl_io *io)
294 {
295         int result = 0;
296         int i;
297         int nr;
298         obd_off start;
299         obd_off end;
300         obd_off file_start;
301         obd_off file_end;
302
303         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
304         struct lov_layout_raid0 *r0     = lov_r0(loo);
305         struct cl_lock          *parent = lck->lls_cl.cls_lock;
306
307         ENTRY;
308
309         lck->lls_orig = parent->cll_descr;
310         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
311         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
312
313         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
314                 /*
315                  * XXX for wide striping smarter algorithm is desirable,
316                  * breaking out of the loop, early.
317                  */
318                 if (lov_stripe_intersects(r0->lo_lsm, i,
319                                           file_start, file_end, &start, &end))
320                         nr++;
321         }
322         LASSERT(nr > 0);
323         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
324         if (lck->lls_sub == NULL)
325                 RETURN(-ENOMEM);
326
327         lck->lls_nr = nr;
328         /*
329          * First, fill in sub-lock descriptions in
330          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
331          * (called below in this function, and by lov_lock_enqueue()) to
332          * create sub-locks. At this moment, no other thread can access
333          * top-lock.
334          */
335         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
336                 if (lov_stripe_intersects(r0->lo_lsm, i,
337                                           file_start, file_end, &start, &end)) {
338                         struct cl_lock_descr *descr;
339
340                         descr = &lck->lls_sub[nr].sub_descr;
341
342                         LASSERT(descr->cld_obj == NULL);
343                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
344                         descr->cld_start = cl_index(descr->cld_obj, start);
345                         descr->cld_end   = cl_index(descr->cld_obj, end);
346                         descr->cld_mode  = parent->cll_descr.cld_mode;
347                         descr->cld_gid   = parent->cll_descr.cld_gid;
348                         descr->cld_enq_flags   = parent->cll_descr.cld_enq_flags;
349                         /* XXX has no effect */
350                         lck->lls_sub[nr].sub_got = *descr;
351                         lck->lls_sub[nr].sub_stripe = i;
352                         nr++;
353                 }
354         }
355         LASSERT(nr == lck->lls_nr);
356         /*
357          * Then, create sub-locks. Once at least one sub-lock was created,
358          * top-lock can be reached by other threads.
359          */
360         for (i = 0; i < lck->lls_nr; ++i) {
361                 struct cl_lock       *sublock;
362                 struct lov_lock_link *link;
363
364                 if (lck->lls_sub[i].sub_lock == NULL) {
365                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
366                         if (IS_ERR(sublock)) {
367                                 result = PTR_ERR(sublock);
368                                 break;
369                         }
370                         cl_lock_get_trust(sublock);
371                         cl_lock_mutex_get(env, sublock);
372                         cl_lock_mutex_get(env, parent);
373                         /*
374                          * recheck under mutex that sub-lock wasn't created
375                          * concurrently, and that top-lock is still alive.
376                          */
377                         if (lck->lls_sub[i].sub_lock == NULL &&
378                             parent->cll_state < CLS_FREEING) {
379                                 lov_sublock_adopt(env, lck, sublock, i, link);
380                                 cl_lock_mutex_put(env, parent);
381                         } else {
382                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
383                                 cl_lock_mutex_put(env, parent);
384                                 cl_lock_unhold(env, sublock,
385                                                "lov-parent", parent);
386                         }
387                         cl_lock_mutex_put(env, sublock);
388                         cl_lock_put(env, sublock);
389                 }
390         }
391         /*
392          * Some sub-locks can be missing at this point. This is not a problem,
393          * because enqueue will create them anyway. Main duty of this function
394          * is to fill in sub-lock descriptions in a race free manner.
395          */
396         RETURN(result);
397 }
398
399 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
400                                int i, int deluser, int rc)
401 {
402         struct cl_lock *parent = lck->lls_cl.cls_lock;
403
404         LASSERT(cl_lock_is_mutexed(parent));
405         ENTRY;
406
407         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
408                 struct cl_lock    *sublock;
409                 int dying;
410
411                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
412                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
413                 LASSERT(cl_lock_is_mutexed(sublock));
414
415                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
416                 if (deluser)
417                         cl_lock_user_del(env, sublock);
418                 /*
419                  * If the last hold is released, and cancellation is pending
420                  * for a sub-lock, release parent mutex, to avoid keeping it
421                  * while sub-lock is being paged out.
422                  */
423                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
424                          sublock->cll_descr.cld_mode == CLM_GROUP ||
425                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
426                         sublock->cll_holds == 1;
427                 if (dying)
428                         cl_lock_mutex_put(env, parent);
429                 cl_lock_unhold(env, sublock, "lov-parent", parent);
430                 if (dying) {
431                         cl_lock_mutex_get(env, parent);
432                         rc = lov_subresult(rc, CLO_REPEAT);
433                 }
434                 /*
435                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
436                  * not backed by a reference on a
437                  * sub-lock. lovsub_lock_delete() will clear
438                  * lck->lls_sub[i].sub_lock under semaphores, just before
439                  * sub-lock is destroyed.
440                  */
441         }
442         RETURN(rc);
443 }
444
445 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
446                              int i)
447 {
448         struct cl_lock *parent = lck->lls_cl.cls_lock;
449
450         LASSERT(cl_lock_is_mutexed(parent));
451         ENTRY;
452
453         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
454                 struct cl_lock *sublock;
455
456                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
457                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
458                 LASSERT(cl_lock_is_mutexed(sublock));
459                 LASSERT(sublock->cll_state != CLS_FREEING);
460
461                 lck->lls_sub[i].sub_flags |= LSF_HELD;
462
463                 cl_lock_get_trust(sublock);
464                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
465                 cl_lock_user_add(env, sublock);
466                 cl_lock_put(env, sublock);
467         }
468         EXIT;
469 }
470
471 static void lov_lock_fini(const struct lu_env *env,
472                           struct cl_lock_slice *slice)
473 {
474         struct lov_lock *lck;
475         int i;
476
477         ENTRY;
478         lck = cl2lov_lock(slice);
479         LASSERT(lck->lls_nr_filled == 0);
480         if (lck->lls_sub != NULL) {
481                 for (i = 0; i < lck->lls_nr; ++i)
482                         /*
483                          * No sub-locks exists at this point, as sub-lock has
484                          * a reference on its parent.
485                          */
486                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
487                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
488         }
489         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
490         EXIT;
491 }
492
493 /**
494  * Tries to advance a state machine of a given sub-lock toward enqueuing of
495  * the top-lock.
496  *
497  * \retval 0 if state-transition can proceed
498  * \retval -ve otherwise.
499  */
500 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
501                                 struct cl_lock *sublock,
502                                 struct cl_io *io, __u32 enqflags, int last)
503 {
504         int result;
505         ENTRY;
506
507         /* first, try to enqueue a sub-lock ... */
508         result = cl_enqueue_try(env, sublock, io, enqflags);
509         if (sublock->cll_state == CLS_ENQUEUED)
510                 /* if it is enqueued, try to `wait' on it---maybe it's already
511                  * granted */
512                 result = cl_wait_try(env, sublock);
513         /*
514          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
515          * parallel, otherwise---enqueue has to wait until sub-lock is granted
516          * before proceeding to the next one.
517          */
518         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
519             enqflags & CEF_ASYNC && !last)
520                 result = 0;
521         RETURN(result);
522 }
523
524 /**
525  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
526  */
527 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
528                             struct cl_io *io, struct lov_lock *lck, int idx)
529 {
530         struct lov_lock_link *link;
531         struct cl_lock       *sublock;
532         int                   result;
533
534         LASSERT(parent->cll_depth == 1);
535         cl_lock_mutex_put(env, parent);
536         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
537         if (!IS_ERR(sublock))
538                 cl_lock_mutex_get(env, sublock);
539         cl_lock_mutex_get(env, parent);
540
541         if (!IS_ERR(sublock)) {
542                 cl_lock_get_trust(sublock);
543                 if (parent->cll_state == CLS_QUEUING &&
544                     lck->lls_sub[idx].sub_lock == NULL) {
545                         lov_sublock_adopt(env, lck, sublock, idx, link);
546                 } else {
547                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
548                         /* other thread allocated sub-lock, or enqueue is no
549                          * longer going on */
550                         cl_lock_mutex_put(env, parent);
551                         cl_lock_unhold(env, sublock, "lov-parent", parent);
552                         cl_lock_mutex_get(env, parent);
553                 }
554                 cl_lock_mutex_put(env, sublock);
555                 cl_lock_put(env, sublock);
556                 result = CLO_REPEAT;
557         } else
558                 result = PTR_ERR(sublock);
559         return result;
560 }
561
562 /**
563  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
564  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
565  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
566  * state machines in the face of sub-locks sharing (by multiple top-locks),
567  * and concurrent sub-lock cancellations.
568  */
569 static int lov_lock_enqueue(const struct lu_env *env,
570                             const struct cl_lock_slice *slice,
571                             struct cl_io *io, __u32 enqflags)
572 {
573         struct cl_lock         *lock    = slice->cls_lock;
574         struct lov_lock        *lck     = cl2lov_lock(slice);
575         struct cl_lock_closure *closure = lov_closure_get(env, lock);
576         int i;
577         int result;
578         enum cl_lock_state minstate;
579
580         ENTRY;
581
582         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
583                 int rc;
584                 struct lovsub_lock     *sub;
585                 struct lov_lock_sub    *lls;
586                 struct cl_lock         *sublock;
587                 struct lov_sublock_env *subenv;
588
589                 if (lock->cll_state != CLS_QUEUING) {
590                         /*
591                          * Lock might have left QUEUING state if previous
592                          * iteration released its mutex. Stop enqueing in this
593                          * case and let the upper layer to decide what to do.
594                          */
595                         LASSERT(i > 0 && result != 0);
596                         break;
597                 }
598
599                 lls = &lck->lls_sub[i];
600                 sub = lls->sub_lock;
601                 /*
602                  * Sub-lock might have been canceled, while top-lock was
603                  * cached.
604                  */
605                 if (sub == NULL) {
606                         result = lov_sublock_fill(env, lock, io, lck, i);
607                         /* lov_sublock_fill() released @lock mutex,
608                          * restart. */
609                         break;
610                 }
611                 sublock = sub->lss_cl.cls_lock;
612                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
613                 if (rc == 0) {
614                         lov_sublock_hold(env, lck, i);
615                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
616                                                   subenv->lse_io, enqflags,
617                                                   i == lck->lls_nr - 1);
618                         minstate = min(minstate, sublock->cll_state);
619                         /*
620                          * Don't hold a sub-lock in CLS_CACHED state, see
621                          * description for lov_lock::lls_sub.
622                          */
623                         if (sublock->cll_state > CLS_HELD)
624                                 rc = lov_sublock_release(env, lck, i, 1, rc);
625                         lov_sublock_unlock(env, sub, closure, subenv);
626                 }
627                 result = lov_subresult(result, rc);
628                 if (result != 0)
629                         break;
630         }
631         cl_lock_closure_fini(closure);
632         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
633 }
634
635 static int lov_lock_unuse(const struct lu_env *env,
636                           const struct cl_lock_slice *slice)
637 {
638         struct lov_lock        *lck     = cl2lov_lock(slice);
639         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
640         int i;
641         int result;
642
643         ENTRY;
644
645         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
646                 int rc;
647                 struct lovsub_lock     *sub;
648                 struct cl_lock         *sublock;
649                 struct lov_lock_sub    *lls;
650                 struct lov_sublock_env *subenv;
651
652                 /* top-lock state cannot change concurrently, because single
653                  * thread (one that released the last hold) carries unlocking
654                  * to the completion. */
655                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
656                 lls = &lck->lls_sub[i];
657                 sub = lls->sub_lock;
658                 if (sub == NULL)
659                         continue;
660
661                 sublock = sub->lss_cl.cls_lock;
662                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
663                 if (rc == 0) {
664                         if (lls->sub_flags & LSF_HELD) {
665                                 LASSERT(sublock->cll_state == CLS_HELD);
666                                 rc = cl_unuse_try(subenv->lse_env, sublock);
667                                 if (rc != CLO_WAIT)
668                                         rc = lov_sublock_release(env, lck,
669                                                                  i, 0, rc);
670                         }
671                         lov_sublock_unlock(env, sub, closure, subenv);
672                 }
673                 result = lov_subresult(result, rc);
674                 if (result < 0)
675                         break;
676         }
677
678         if (result == 0 && lck->lls_cancel_race) {
679                 lck->lls_cancel_race = 0;
680                 result = -ESTALE;
681         }
682         cl_lock_closure_fini(closure);
683         RETURN(result);
684 }
685
686
687 static void lov_lock_cancel(const struct lu_env *env,
688                            const struct cl_lock_slice *slice)
689 {
690         struct lov_lock        *lck     = cl2lov_lock(slice);
691         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
692         int i;
693         int result;
694
695         ENTRY;
696
697         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
698                 int rc;
699                 struct lovsub_lock     *sub;
700                 struct cl_lock         *sublock;
701                 struct lov_lock_sub    *lls;
702                 struct lov_sublock_env *subenv;
703
704                 /* top-lock state cannot change concurrently, because single
705                  * thread (one that released the last hold) carries unlocking
706                  * to the completion. */
707                 lls = &lck->lls_sub[i];
708                 sub = lls->sub_lock;
709                 if (sub == NULL)
710                         continue;
711
712                 sublock = sub->lss_cl.cls_lock;
713                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
714                 if (rc == 0) {
715                         if (lls->sub_flags & LSF_HELD) {
716                                 if (sublock->cll_state == CLS_HELD) {
717                                         rc = cl_unuse_try(subenv->lse_env,
718                                                           sublock);
719                                         lov_sublock_release(env, lck, i, 0, 0);
720                                 } else {
721                                         lov_sublock_release(env, lck, i, 1, 0);
722                                 }
723                         }
724                         lov_sublock_unlock(env, sub, closure, subenv);
725                 }
726                 result = lov_subresult(result, rc);
727                 if (result < 0)
728                         break;
729         }
730
731         cl_lock_closure_fini(closure);
732
733         return;
734 }
735
736 static int lov_lock_wait(const struct lu_env *env,
737                          const struct cl_lock_slice *slice)
738 {
739         struct lov_lock        *lck     = cl2lov_lock(slice);
740         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
741         enum cl_lock_state      minstate;
742         int                     result;
743         int                     i;
744
745         ENTRY;
746
747         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
748                 int rc;
749                 struct lovsub_lock     *sub;
750                 struct cl_lock         *sublock;
751                 struct lov_lock_sub    *lls;
752                 struct lov_sublock_env *subenv;
753
754                 lls = &lck->lls_sub[i];
755                 sub = lls->sub_lock;
756                 LASSERT(sub != NULL);
757                 sublock = sub->lss_cl.cls_lock;
758                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
759                 if (rc == 0) {
760                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
761                         if (sublock->cll_state < CLS_HELD)
762                                 rc = cl_wait_try(env, sublock);
763
764                         minstate = min(minstate, sublock->cll_state);
765                         lov_sublock_unlock(env, sub, closure, subenv);
766                 }
767                 result = lov_subresult(result, rc);
768                 if (result != 0)
769                         break;
770         }
771         cl_lock_closure_fini(closure);
772         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
773 }
774
775 static int lov_lock_use(const struct lu_env *env,
776                         const struct cl_lock_slice *slice)
777 {
778         struct lov_lock        *lck     = cl2lov_lock(slice);
779         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
780         int                     result;
781         int                     i;
782
783         LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
784         ENTRY;
785
786         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
787                 int rc;
788                 struct lovsub_lock     *sub;
789                 struct cl_lock         *sublock;
790                 struct lov_lock_sub    *lls;
791                 struct lov_sublock_env *subenv;
792
793                 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
794
795                 lls = &lck->lls_sub[i];
796                 sub = lls->sub_lock;
797                 if (sub == NULL) {
798                         /*
799                          * Sub-lock might have been canceled, while top-lock was
800                          * cached.
801                          */
802                         result = -ESTALE;
803                         break;
804                 }
805
806                 sublock = sub->lss_cl.cls_lock;
807                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
808                 if (rc == 0) {
809                         LASSERT(sublock->cll_state != CLS_FREEING);
810                         lov_sublock_hold(env, lck, i);
811                         if (sublock->cll_state == CLS_CACHED) {
812                                 rc = cl_use_try(subenv->lse_env, sublock, 0);
813                                 if (rc != 0)
814                                         rc = lov_sublock_release(env, lck,
815                                                                  i, 1, rc);
816                         }
817                         lov_sublock_unlock(env, sub, closure, subenv);
818                 }
819                 result = lov_subresult(result, rc);
820                 if (result != 0)
821                         break;
822         }
823
824         if (lck->lls_cancel_race) {
825                 /*
826                  * If there is unlocking happened at the same time, then
827                  * sublock_lock state should be FREEING, and lov_sublock_lock
828                  * should return CLO_REPEAT. In this case, it should return
829                  * ESTALE, and up layer should reset the lock state to be NEW.
830                  */
831                 lck->lls_cancel_race = 0;
832                 LASSERT(result != 0);
833                 result = -ESTALE;
834         }
835         cl_lock_closure_fini(closure);
836         RETURN(result);
837 }
838
839 #if 0
840 static int lock_lock_multi_match()
841 {
842         struct cl_lock          *lock    = slice->cls_lock;
843         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
844         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
845         struct lov_layout_raid0 *r0      = lov_r0(loo);
846         struct lov_lock_sub     *sub;
847         struct cl_object        *subobj;
848         obd_off  fstart;
849         obd_off  fend;
850         obd_off  start;
851         obd_off  end;
852         int i;
853
854         fstart = cl_offset(need->cld_obj, need->cld_start);
855         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
856         subneed->cld_mode = need->cld_mode;
857         cl_lock_mutex_get(env, lock);
858         for (i = 0; i < lov->lls_nr; ++i) {
859                 sub = &lov->lls_sub[i];
860                 if (sub->sub_lock == NULL)
861                         continue;
862                 subobj = sub->sub_descr.cld_obj;
863                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
864                                            fstart, fend, &start, &end))
865                         continue;
866                 subneed->cld_start = cl_index(subobj, start);
867                 subneed->cld_end   = cl_index(subobj, end);
868                 subneed->cld_obj   = subobj;
869                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
870                         result = 0;
871                         break;
872                 }
873         }
874         cl_lock_mutex_put(env, lock);
875 }
876 #endif
877
878 /**
879  * Check if the extent region \a descr is covered by \a child against the
880  * specific \a stripe.
881  */
882 static int lov_lock_stripe_is_matching(const struct lu_env *env,
883                                        struct lov_object *lov, int stripe,
884                                        const struct cl_lock_descr *child,
885                                        const struct cl_lock_descr *descr)
886 {
887         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
888         obd_off start;
889         obd_off end;
890         int result;
891
892         if (lov_r0(lov)->lo_nr == 1)
893                 return cl_lock_ext_match(child, descr);
894
895         /*
896          * For a multi-stripes object:
897          * - make sure the descr only covers child's stripe, and
898          * - check if extent is matching.
899          */
900         start = cl_offset(&lov->lo_cl, descr->cld_start);
901         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
902         result = end - start <= lsm->lsm_stripe_size &&
903                  stripe == lov_stripe_number(lsm, start) &&
904                  stripe == lov_stripe_number(lsm, end);
905         if (result) {
906                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
907                 obd_off sub_start;
908                 obd_off sub_end;
909
910                 subd->cld_obj  = NULL;   /* don't need sub object at all */
911                 subd->cld_mode = descr->cld_mode;
912                 subd->cld_gid  = descr->cld_gid;
913                 result = lov_stripe_intersects(lsm, stripe, start, end,
914                                                &sub_start, &sub_end);
915                 LASSERT(result);
916                 subd->cld_start = cl_index(child->cld_obj, sub_start);
917                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
918                 result = cl_lock_ext_match(child, subd);
919         }
920         return result;
921 }
922
923 /**
924  * An implementation of cl_lock_operations::clo_fits_into() method.
925  *
926  * Checks whether a lock (given by \a slice) is suitable for \a
927  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
928  * O_APPEND write.
929  *
930  * \see ccc_lock_fits_into().
931  */
932 static int lov_lock_fits_into(const struct lu_env *env,
933                               const struct cl_lock_slice *slice,
934                               const struct cl_lock_descr *need,
935                               const struct cl_io *io)
936 {
937         struct lov_lock   *lov = cl2lov_lock(slice);
938         struct lov_object *obj = cl2lov(slice->cls_obj);
939         int result;
940
941         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
942         LASSERT(lov->lls_nr > 0);
943
944         ENTRY;
945
946         if (need->cld_mode == CLM_GROUP)
947                 /*
948                  * always allow to match group lock.
949                  */
950                 result = cl_lock_ext_match(&lov->lls_orig, need);
951         else if (lov->lls_nr == 1) {
952                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
953                 result = lov_lock_stripe_is_matching(env,
954                                                      cl2lov(slice->cls_obj),
955                                                      lov->lls_sub[0].sub_stripe,
956                                                      got, need);
957         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
958                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
959                 /*
960                  * Multi-stripe locks are only suitable for `quick' IO and for
961                  * glimpse.
962                  */
963                 result = 0;
964         else
965                 /*
966                  * Most general case: multi-stripe existing lock, and
967                  * (potentially) multi-stripe @need lock. Check that @need is
968                  * covered by @lov's sub-locks.
969                  *
970                  * For now, ignore lock expansions made by the server, and
971                  * match against original lock extent.
972                  */
973                 result = cl_lock_ext_match(&lov->lls_orig, need);
974         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
975                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
976                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
977                result);
978         RETURN(result);
979 }
980
981 void lov_lock_unlink(const struct lu_env *env,
982                      struct lov_lock_link *link, struct lovsub_lock *sub)
983 {
984         struct lov_lock *lck    = link->lll_super;
985         struct cl_lock  *parent = lck->lls_cl.cls_lock;
986
987         LASSERT(cl_lock_is_mutexed(parent));
988         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
989         ENTRY;
990
991         list_del_init(&link->lll_list);
992         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
993         /* yank this sub-lock from parent's array */
994         lck->lls_sub[link->lll_idx].sub_lock = NULL;
995         LASSERT(lck->lls_nr_filled > 0);
996         lck->lls_nr_filled--;
997         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
998         cl_lock_put(env, parent);
999         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1000         EXIT;
1001 }
1002
1003 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1004                                          struct lov_lock *lck,
1005                                          struct lovsub_lock *sub)
1006 {
1007         struct lov_lock_link *scan;
1008
1009         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1010         ENTRY;
1011
1012         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1013                 if (scan->lll_super == lck)
1014                         RETURN(scan);
1015         }
1016         RETURN(NULL);
1017 }
1018
1019 /**
1020  * An implementation of cl_lock_operations::clo_delete() method. This is
1021  * invoked for "top-to-bottom" delete, when lock destruction starts from the
1022  * top-lock, e.g., as a result of inode destruction.
1023  *
1024  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1025  * this is done separately elsewhere:
1026  *
1027  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
1028  *       each sub-object, purging its locks;
1029  *
1030  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1031  *       left in the cache.
1032  */
1033 static void lov_lock_delete(const struct lu_env *env,
1034                             const struct cl_lock_slice *slice)
1035 {
1036         struct lov_lock        *lck     = cl2lov_lock(slice);
1037         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1038         int i;
1039
1040         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1041         ENTRY;
1042
1043         for (i = 0; i < lck->lls_nr; ++i) {
1044                 struct lov_lock_sub *lls;
1045                 struct lovsub_lock  *lsl;
1046                 struct cl_lock      *sublock;
1047                 int rc;
1048
1049                 lls = &lck->lls_sub[i];
1050                 lsl = lls->sub_lock;
1051                 if (lsl == NULL)
1052                         continue;
1053
1054                 sublock = lsl->lss_cl.cls_lock;
1055                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1056                 if (rc == 0) {
1057                         if (lls->sub_flags & LSF_HELD)
1058                                 lov_sublock_release(env, lck, i, 1, 0);
1059                         if (sublock->cll_state < CLS_FREEING) {
1060                                 struct lov_lock_link *link;
1061
1062                                 link = lov_lock_link_find(env, lck, lsl);
1063                                 LASSERT(link != NULL);
1064                                 lov_lock_unlink(env, link, lsl);
1065                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1066                         }
1067                         lov_sublock_unlock(env, lsl, closure, NULL);
1068                 } else if (rc == CLO_REPEAT) {
1069                         --i; /* repeat with this lock */
1070                 } else {
1071                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
1072                                       "Cannot get sub-lock for delete: %i\n",
1073                                       rc);
1074                 }
1075         }
1076         cl_lock_closure_fini(closure);
1077         EXIT;
1078 }
1079
1080 static int lov_lock_print(const struct lu_env *env, void *cookie,
1081                           lu_printer_t p, const struct cl_lock_slice *slice)
1082 {
1083         struct lov_lock *lck = cl2lov_lock(slice);
1084         int              i;
1085
1086         (*p)(env, cookie, "%d\n", lck->lls_nr);
1087         for (i = 0; i < lck->lls_nr; ++i) {
1088                 struct lov_lock_sub *sub;
1089
1090                 sub = &lck->lls_sub[i];
1091                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1092                 if (sub->sub_lock != NULL)
1093                         cl_lock_print(env, cookie, p,
1094                                       sub->sub_lock->lss_cl.cls_lock);
1095                 else
1096                         (*p)(env, cookie, "---\n");
1097         }
1098         return 0;
1099 }
1100
1101 static const struct cl_lock_operations lov_lock_ops = {
1102         .clo_fini      = lov_lock_fini,
1103         .clo_enqueue   = lov_lock_enqueue,
1104         .clo_wait      = lov_lock_wait,
1105         .clo_use       = lov_lock_use,
1106         .clo_unuse     = lov_lock_unuse,
1107         .clo_cancel    = lov_lock_cancel,
1108         .clo_fits_into = lov_lock_fits_into,
1109         .clo_delete    = lov_lock_delete,
1110         .clo_print     = lov_lock_print
1111 };
1112
1113 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1114                         struct cl_lock *lock, const struct cl_io *io)
1115 {
1116         struct lov_lock *lck;
1117         int result;
1118
1119         ENTRY;
1120         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1121         if (lck != NULL) {
1122                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1123                 result = lov_lock_sub_init(env, lck, io);
1124         } else
1125                 result = -ENOMEM;
1126         RETURN(result);
1127 }
1128
1129 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1130                                                struct cl_lock *parent)
1131 {
1132         struct cl_lock_closure *closure;
1133
1134         closure = &lov_env_info(env)->lti_closure;
1135         LASSERT(list_empty(&closure->clc_list));
1136         cl_lock_closure_init(env, closure, parent, 1);
1137         return closure;
1138 }
1139
1140
1141 /** @} lov */