Whamcloud - gitweb
b=20500
[fs/lustre-release.git] / lustre / lov / lov_lock.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * Implementation of cl_lock for LOV layer.
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_LOV
42
43 #include "lov_cl_internal.h"
44
45 /** \addtogroup lov
46  *  @{
47  */
48
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50                                                struct cl_lock *parent);
51
52 /*****************************************************************************
53  *
54  * Lov lock operations.
55  *
56  */
57
58 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
59                                                    struct cl_lock *parent,
60                                                    struct lov_lock_sub *lls)
61 {
62         struct lov_sublock_env *subenv;
63         struct lov_io          *lio    = lov_env_io(env);
64         struct cl_io           *io     = lio->lis_cl.cis_io;
65         struct lov_io_sub      *sub;
66
67         subenv = &lov_env_session(env)->ls_subenv;
68
69         /*
70          * FIXME: We tend to use the subio's env & io to call the sublock
71          * lock operations because osc lock sometimes stores some control
72          * variables in thread's IO infomation(Now only lockless information).
73          * However, if the lock's host(object) is different from the object
74          * for current IO, we have no way to get the subenv and subio because
75          * they are not initialized at all. As a temp fix, in this case,
76          * we still borrow the parent's env to call sublock operations.
77          */
78         if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
79                 subenv->lse_env = env;
80                 subenv->lse_io  = io;
81                 subenv->lse_sub = NULL;
82         } else {
83                 LASSERT(io != NULL);
84                 sub = lov_sub_get(env, lio, lls->sub_stripe);
85                 if (!IS_ERR(sub)) {
86                         subenv->lse_env = sub->sub_env;
87                         subenv->lse_io  = sub->sub_io;
88                         subenv->lse_sub = sub;
89                 } else {
90                         subenv = (void*)sub;
91                 }
92         }
93         return subenv;
94 }
95
96 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
97 {
98         if (subenv && subenv->lse_sub)
99                 lov_sub_put(subenv->lse_sub);
100 }
101
102 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
103                               struct cl_lock *sublock, int idx,
104                               struct lov_lock_link *link)
105 {
106         struct lovsub_lock *lsl;
107         struct cl_lock     *parent = lck->lls_cl.cls_lock;
108         int                 rc;
109
110         LASSERT(cl_lock_is_mutexed(parent));
111         LASSERT(cl_lock_is_mutexed(sublock));
112         ENTRY;
113
114         lsl = cl2sub_lock(sublock);
115         /*
116          * check that sub-lock doesn't have lock link to this top-lock.
117          */
118         LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
119         LASSERT(idx < lck->lls_nr);
120
121         lck->lls_sub[idx].sub_lock = lsl;
122         lck->lls_nr_filled++;
123         LASSERT(lck->lls_nr_filled <= lck->lls_nr);
124         list_add_tail(&link->lll_list, &lsl->lss_parents);
125         link->lll_idx = idx;
126         link->lll_super = lck;
127         cl_lock_get(parent);
128         lu_ref_add(&parent->cll_reference, "lov-child", sublock);
129         lck->lls_sub[idx].sub_flags |= LSF_HELD;
130         cl_lock_user_add(env, sublock);
131
132         rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
133         LASSERT(rc == 0); /* there is no way this can fail, currently */
134         EXIT;
135 }
136
137 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
138                                          const struct cl_io *io,
139                                          struct lov_lock *lck,
140                                          int idx, struct lov_lock_link **out)
141 {
142         struct cl_lock       *sublock;
143         struct cl_lock       *parent;
144         struct lov_lock_link *link;
145
146         LASSERT(idx < lck->lls_nr);
147         ENTRY;
148
149         OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
150         if (link != NULL) {
151                 struct lov_sublock_env *subenv;
152                 struct lov_lock_sub  *lls;
153                 struct cl_lock_descr *descr;
154
155                 parent = lck->lls_cl.cls_lock;
156                 lls    = &lck->lls_sub[idx];
157                 descr  = &lls->sub_descr;
158
159                 subenv = lov_sublock_env_get(env, parent, lls);
160                 if (!IS_ERR(subenv)) {
161                         /* CAVEAT: Don't try to add a field in lov_lock_sub
162                          * to remember the subio. This is because lock is able
163                          * to be cached, but this is not true for IO. This
164                          * further means a sublock might be referenced in
165                          * different io context. -jay */
166
167                         sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
168                                                descr, "lov-parent", parent);
169                         lov_sublock_env_put(subenv);
170                 } else {
171                         /* error occurs. */
172                         sublock = (void*)subenv;
173                 }
174
175                 if (!IS_ERR(sublock))
176                         *out = link;
177                 else
178                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
179         } else
180                 sublock = ERR_PTR(-ENOMEM);
181         RETURN(sublock);
182 }
183
184 static void lov_sublock_unlock(const struct lu_env *env,
185                                struct lovsub_lock *lsl,
186                                struct cl_lock_closure *closure,
187                                struct lov_sublock_env *subenv)
188 {
189         ENTRY;
190         lov_sublock_env_put(subenv);
191         lsl->lss_active = NULL;
192         cl_lock_disclosure(env, closure);
193         EXIT;
194 }
195
196 static int lov_sublock_lock(const struct lu_env *env,
197                             struct lov_lock *lck,
198                             struct lov_lock_sub *lls,
199                             struct cl_lock_closure *closure,
200                             struct lov_sublock_env **lsep)
201 {
202         struct lovsub_lock *sublock;
203         struct cl_lock     *child;
204         int                 result = 0;
205         ENTRY;
206
207         LASSERT(list_empty(&closure->clc_list));
208
209         sublock = lls->sub_lock;
210         child = sublock->lss_cl.cls_lock;
211         result = cl_lock_closure_build(env, child, closure);
212         if (result == 0) {
213                 struct cl_lock *parent = closure->clc_origin;
214
215                 LASSERT(cl_lock_is_mutexed(child));
216                 sublock->lss_active = parent;
217
218                 if (unlikely(child->cll_state == CLS_FREEING)) {
219                         struct lov_lock_link *link;
220                         /*
221                          * we could race with lock deletion which temporarily
222                          * put the lock in freeing state, bug 19080.
223                          */
224                         LASSERT(!(lls->sub_flags & LSF_HELD));
225
226                         link = lov_lock_link_find(env, lck, sublock);
227                         LASSERT(link != NULL);
228                         lov_lock_unlink(env, link, sublock);
229                         lov_sublock_unlock(env, sublock, closure, NULL);
230                         result = CLO_REPEAT;
231                 } else if (lsep) {
232                         struct lov_sublock_env *subenv;
233                         subenv = lov_sublock_env_get(env, parent, lls);
234                         if (IS_ERR(subenv)) {
235                                 lov_sublock_unlock(env, sublock,
236                                                    closure, NULL);
237                                 result = PTR_ERR(subenv);
238                         } else {
239                                 *lsep = subenv;
240                         }
241                 }
242         }
243         RETURN(result);
244 }
245
246 /**
247  * Updates the result of a top-lock operation from a result of sub-lock
248  * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
249  * over sub-locks and lov_subresult() is used to calculate return value of a
250  * top-operation. To this end, possible return values of sub-operations are
251  * ordered as
252  *
253  *     - 0                  success
254  *     - CLO_WAIT           wait for event
255  *     - CLO_REPEAT         repeat top-operation
256  *     - -ne                fundamental error
257  *
258  * Top-level return code can only go down through this list. CLO_REPEAT
259  * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
260  * has to be rechecked by the upper layer.
261  */
262 static int lov_subresult(int result, int rc)
263 {
264         int result_rank;
265         int rc_rank;
266
267         LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
268         LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
269         CLASSERT(CLO_WAIT < CLO_REPEAT);
270
271         ENTRY;
272
273         /* calculate ranks in the ordering above */
274         result_rank = result < 0 ? 1 + CLO_REPEAT : result;
275         rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
276
277         if (result_rank < rc_rank)
278                 result = rc;
279         RETURN(result);
280 }
281
282 /**
283  * Creates sub-locks for a given lov_lock for the first time.
284  *
285  * Goes through all sub-objects of top-object, and creates sub-locks on every
286  * sub-object intersecting with top-lock extent. This is complicated by the
287  * fact that top-lock (that is being created) can be accessed concurrently
288  * through already created sub-locks (possibly shared with other top-locks).
289  */
290 static int lov_lock_sub_init(const struct lu_env *env,
291                              struct lov_lock *lck, const struct cl_io *io)
292 {
293         int result = 0;
294         int i;
295         int nr;
296         obd_off start;
297         obd_off end;
298         obd_off file_start;
299         obd_off file_end;
300
301         struct lov_object       *loo    = cl2lov(lck->lls_cl.cls_obj);
302         struct lov_layout_raid0 *r0     = lov_r0(loo);
303         struct cl_lock          *parent = lck->lls_cl.cls_lock;
304
305         ENTRY;
306
307         lck->lls_orig = parent->cll_descr;
308         file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
309         file_end   = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
310
311         for (i = 0, nr = 0; i < r0->lo_nr; i++) {
312                 /*
313                  * XXX for wide striping smarter algorithm is desirable,
314                  * breaking out of the loop, early.
315                  */
316                 if (lov_stripe_intersects(r0->lo_lsm, i,
317                                           file_start, file_end, &start, &end))
318                         nr++;
319         }
320         LASSERT(nr > 0);
321         OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
322         if (lck->lls_sub == NULL)
323                 RETURN(-ENOMEM);
324
325         lck->lls_nr = nr;
326         /*
327          * First, fill in sub-lock descriptions in
328          * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
329          * (called below in this function, and by lov_lock_enqueue()) to
330          * create sub-locks. At this moment, no other thread can access
331          * top-lock.
332          */
333         for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
334                 if (lov_stripe_intersects(r0->lo_lsm, i,
335                                           file_start, file_end, &start, &end)) {
336                         struct cl_lock_descr *descr;
337
338                         descr = &lck->lls_sub[nr].sub_descr;
339
340                         LASSERT(descr->cld_obj == NULL);
341                         descr->cld_obj   = lovsub2cl(r0->lo_sub[i]);
342                         descr->cld_start = cl_index(descr->cld_obj, start);
343                         descr->cld_end   = cl_index(descr->cld_obj, end);
344                         descr->cld_mode  = parent->cll_descr.cld_mode;
345                         descr->cld_gid   = parent->cll_descr.cld_gid;
346                         /* XXX has no effect */
347                         lck->lls_sub[nr].sub_got = *descr;
348                         lck->lls_sub[nr].sub_stripe = i;
349                         nr++;
350                 }
351         }
352         LASSERT(nr == lck->lls_nr);
353         /*
354          * Then, create sub-locks. Once at least one sub-lock was created,
355          * top-lock can be reached by other threads.
356          */
357         for (i = 0; i < lck->lls_nr; ++i) {
358                 struct cl_lock       *sublock;
359                 struct lov_lock_link *link;
360
361                 if (lck->lls_sub[i].sub_lock == NULL) {
362                         sublock = lov_sublock_alloc(env, io, lck, i, &link);
363                         if (IS_ERR(sublock)) {
364                                 result = PTR_ERR(sublock);
365                                 break;
366                         }
367                         cl_lock_mutex_get(env, sublock);
368                         cl_lock_mutex_get(env, parent);
369                         /*
370                          * recheck under mutex that sub-lock wasn't created
371                          * concurrently, and that top-lock is still alive.
372                          */
373                         if (lck->lls_sub[i].sub_lock == NULL &&
374                             parent->cll_state < CLS_FREEING) {
375                                 lov_sublock_adopt(env, lck, sublock, i, link);
376                                 cl_lock_mutex_put(env, parent);
377                         } else {
378                                 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
379                                 cl_lock_mutex_put(env, parent);
380                                 cl_lock_unhold(env, sublock,
381                                                "lov-parent", parent);
382                         }
383                         cl_lock_mutex_put(env, sublock);
384                 }
385         }
386         /*
387          * Some sub-locks can be missing at this point. This is not a problem,
388          * because enqueue will create them anyway. Main duty of this function
389          * is to fill in sub-lock descriptions in a race free manner.
390          */
391         RETURN(result);
392 }
393
394 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
395                                int i, int deluser, int rc)
396 {
397         struct cl_lock *parent = lck->lls_cl.cls_lock;
398
399         LASSERT(cl_lock_is_mutexed(parent));
400         ENTRY;
401
402         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
403                 struct cl_lock    *sublock;
404                 int dying;
405
406                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
407                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
408                 LASSERT(cl_lock_is_mutexed(sublock));
409
410                 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
411                 if (deluser)
412                         cl_lock_user_del(env, sublock);
413                 /*
414                  * If the last hold is released, and cancellation is pending
415                  * for a sub-lock, release parent mutex, to avoid keeping it
416                  * while sub-lock is being paged out.
417                  */
418                 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
419                          sublock->cll_descr.cld_mode == CLM_GROUP ||
420                          (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
421                         sublock->cll_holds == 1;
422                 if (dying)
423                         cl_lock_mutex_put(env, parent);
424                 cl_lock_unhold(env, sublock, "lov-parent", parent);
425                 if (dying) {
426                         cl_lock_mutex_get(env, parent);
427                         rc = lov_subresult(rc, CLO_REPEAT);
428                 }
429                 /*
430                  * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
431                  * not backed by a reference on a
432                  * sub-lock. lovsub_lock_delete() will clear
433                  * lck->lls_sub[i].sub_lock under semaphores, just before
434                  * sub-lock is destroyed.
435                  */
436         }
437         RETURN(rc);
438 }
439
440 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
441                              int i)
442 {
443         struct cl_lock *parent = lck->lls_cl.cls_lock;
444
445         LASSERT(cl_lock_is_mutexed(parent));
446         ENTRY;
447
448         if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
449                 struct cl_lock *sublock;
450
451                 LASSERT(lck->lls_sub[i].sub_lock != NULL);
452                 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
453                 LASSERT(cl_lock_is_mutexed(sublock));
454                 LASSERT(sublock->cll_state != CLS_FREEING);
455
456                 lck->lls_sub[i].sub_flags |= LSF_HELD;
457
458                 cl_lock_get_trust(sublock);
459                 cl_lock_hold_add(env, sublock, "lov-parent", parent);
460                 cl_lock_user_add(env, sublock);
461                 cl_lock_put(env, sublock);
462         }
463         EXIT;
464 }
465
466 static void lov_lock_fini(const struct lu_env *env,
467                           struct cl_lock_slice *slice)
468 {
469         struct lov_lock *lck;
470         int i;
471
472         ENTRY;
473         lck = cl2lov_lock(slice);
474         LASSERT(lck->lls_nr_filled == 0);
475         if (lck->lls_sub != NULL) {
476                 for (i = 0; i < lck->lls_nr; ++i)
477                         /*
478                          * No sub-locks exists at this point, as sub-lock has
479                          * a reference on its parent.
480                          */
481                         LASSERT(lck->lls_sub[i].sub_lock == NULL);
482                 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
483         }
484         OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
485         EXIT;
486 }
487
488 /**
489  * Tries to advance a state machine of a given sub-lock toward enqueuing of
490  * the top-lock.
491  *
492  * \retval 0 if state-transition can proceed
493  * \retval -ve otherwise.
494  */
495 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
496                                 struct cl_lock *sublock,
497                                 struct cl_io *io, __u32 enqflags, int last)
498 {
499         int result;
500         ENTRY;
501
502         /* first, try to enqueue a sub-lock ... */
503         result = cl_enqueue_try(env, sublock, io, enqflags);
504         if (sublock->cll_state == CLS_ENQUEUED)
505                 /* if it is enqueued, try to `wait' on it---maybe it's already
506                  * granted */
507                 result = cl_wait_try(env, sublock);
508         /*
509          * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
510          * parallel, otherwise---enqueue has to wait until sub-lock is granted
511          * before proceeding to the next one.
512          */
513         if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
514             enqflags & CEF_ASYNC && !last)
515                 result = 0;
516         RETURN(result);
517 }
518
519 /**
520  * Helper function for lov_lock_enqueue() that creates missing sub-lock.
521  */
522 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
523                             struct cl_io *io, struct lov_lock *lck, int idx)
524 {
525         struct lov_lock_link *link;
526         struct cl_lock       *sublock;
527         int                   result;
528
529         LASSERT(parent->cll_depth == 1);
530         cl_lock_mutex_put(env, parent);
531         sublock = lov_sublock_alloc(env, io, lck, idx, &link);
532         if (!IS_ERR(sublock))
533                 cl_lock_mutex_get(env, sublock);
534         cl_lock_mutex_get(env, parent);
535
536         if (!IS_ERR(sublock)) {
537                 if (parent->cll_state == CLS_QUEUING &&
538                     lck->lls_sub[idx].sub_lock == NULL)
539                         lov_sublock_adopt(env, lck, sublock, idx, link);
540                 else {
541                         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
542                         /* other thread allocated sub-lock, or enqueue is no
543                          * longer going on */
544                         cl_lock_mutex_put(env, parent);
545                         cl_lock_unhold(env, sublock, "lov-parent", parent);
546                         cl_lock_mutex_get(env, parent);
547                 }
548                 cl_lock_mutex_put(env, sublock);
549                 result = CLO_REPEAT;
550         } else
551                 result = PTR_ERR(sublock);
552         return result;
553 }
554
555 /**
556  * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
557  * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
558  * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
559  * state machines in the face of sub-locks sharing (by multiple top-locks),
560  * and concurrent sub-lock cancellations.
561  */
562 static int lov_lock_enqueue(const struct lu_env *env,
563                             const struct cl_lock_slice *slice,
564                             struct cl_io *io, __u32 enqflags)
565 {
566         struct cl_lock         *lock    = slice->cls_lock;
567         struct lov_lock        *lck     = cl2lov_lock(slice);
568         struct cl_lock_closure *closure = lov_closure_get(env, lock);
569         int i;
570         int result;
571         enum cl_lock_state minstate;
572
573         ENTRY;
574
575         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
576                 int rc;
577                 struct lovsub_lock     *sub;
578                 struct lov_lock_sub    *lls;
579                 struct cl_lock         *sublock;
580                 struct lov_sublock_env *subenv;
581
582                 if (lock->cll_state != CLS_QUEUING) {
583                         /*
584                          * Lock might have left QUEUING state if previous
585                          * iteration released its mutex. Stop enqueing in this
586                          * case and let the upper layer to decide what to do.
587                          */
588                         LASSERT(i > 0 && result != 0);
589                         break;
590                 }
591
592                 lls = &lck->lls_sub[i];
593                 sub = lls->sub_lock;
594                 /*
595                  * Sub-lock might have been canceled, while top-lock was
596                  * cached.
597                  */
598                 if (sub == NULL) {
599                         result = lov_sublock_fill(env, lock, io, lck, i);
600                         /* lov_sublock_fill() released @lock mutex,
601                          * restart. */
602                         break;
603                 }
604                 sublock = sub->lss_cl.cls_lock;
605                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
606                 if (rc == 0) {
607                         lov_sublock_hold(env, lck, i);
608                         rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
609                                                   subenv->lse_io, enqflags,
610                                                   i == lck->lls_nr - 1);
611                         minstate = min(minstate, sublock->cll_state);
612                         /*
613                          * Don't hold a sub-lock in CLS_CACHED state, see
614                          * description for lov_lock::lls_sub.
615                          */
616                         if (sublock->cll_state > CLS_HELD)
617                                 rc = lov_sublock_release(env, lck, i, 1, rc);
618                         lov_sublock_unlock(env, sub, closure, subenv);
619                 }
620                 result = lov_subresult(result, rc);
621                 if (result != 0)
622                         break;
623         }
624         cl_lock_closure_fini(closure);
625         RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
626 }
627
628 static int lov_lock_unuse(const struct lu_env *env,
629                           const struct cl_lock_slice *slice)
630 {
631         struct lov_lock        *lck     = cl2lov_lock(slice);
632         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
633         int i;
634         int result;
635
636         ENTRY;
637
638         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
639                 int rc;
640                 struct lovsub_lock     *sub;
641                 struct cl_lock         *sublock;
642                 struct lov_lock_sub    *lls;
643                 struct lov_sublock_env *subenv;
644
645                 /* top-lock state cannot change concurrently, because single
646                  * thread (one that released the last hold) carries unlocking
647                  * to the completion. */
648                 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
649                 lls = &lck->lls_sub[i];
650                 sub = lls->sub_lock;
651                 if (sub == NULL)
652                         continue;
653
654                 sublock = sub->lss_cl.cls_lock;
655                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
656                 if (rc == 0) {
657                         if (lck->lls_sub[i].sub_flags & LSF_HELD) {
658                                 LASSERT(sublock->cll_state == CLS_HELD);
659                                 rc = cl_unuse_try(subenv->lse_env, sublock);
660                                 if (rc != CLO_WAIT)
661                                         rc = lov_sublock_release(env, lck,
662                                                                  i, 0, rc);
663                         }
664                         lov_sublock_unlock(env, sub, closure, subenv);
665                 }
666                 result = lov_subresult(result, rc);
667                 if (result < 0)
668                         break;
669         }
670         if (result == 0 && lck->lls_unuse_race) {
671                 lck->lls_unuse_race = 0;
672                 result = -ESTALE;
673         }
674         cl_lock_closure_fini(closure);
675         RETURN(result);
676 }
677
678 static int lov_lock_wait(const struct lu_env *env,
679                          const struct cl_lock_slice *slice)
680 {
681         struct lov_lock        *lck     = cl2lov_lock(slice);
682         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
683         enum cl_lock_state      minstate;
684         int                     result;
685         int                     i;
686
687         ENTRY;
688
689         for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
690                 int rc;
691                 struct lovsub_lock     *sub;
692                 struct cl_lock         *sublock;
693                 struct lov_lock_sub    *lls;
694                 struct lov_sublock_env *subenv;
695
696                 lls = &lck->lls_sub[i];
697                 sub = lls->sub_lock;
698                 LASSERT(sub != NULL);
699                 sublock = sub->lss_cl.cls_lock;
700                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
701                 if (rc == 0) {
702                         LASSERT(sublock->cll_state >= CLS_ENQUEUED);
703                         if (sublock->cll_state < CLS_HELD)
704                                 rc = cl_wait_try(env, sublock);
705
706                         minstate = min(minstate, sublock->cll_state);
707                         lov_sublock_unlock(env, sub, closure, subenv);
708                 }
709                 result = lov_subresult(result, rc);
710                 if (result < 0)
711                         break;
712         }
713         cl_lock_closure_fini(closure);
714         RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
715 }
716
717 static int lov_lock_use(const struct lu_env *env,
718                         const struct cl_lock_slice *slice)
719 {
720         struct lov_lock        *lck     = cl2lov_lock(slice);
721         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
722         int                     result;
723         int                     i;
724
725         LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
726         ENTRY;
727
728         for (result = 0, i = 0; i < lck->lls_nr; ++i) {
729                 int rc;
730                 struct lovsub_lock     *sub;
731                 struct cl_lock         *sublock;
732                 struct lov_lock_sub    *lls;
733                 struct lov_sublock_env *subenv;
734
735                 if (slice->cls_lock->cll_state != CLS_CACHED) {
736                         /* see comment in lov_lock_enqueue(). */
737                         LASSERT(i > 0 && result != 0);
738                         break;
739                 }
740                 /*
741                  * if a sub-lock was destroyed while top-lock was in
742                  * CLS_CACHED state, top-lock would have been moved into
743                  * CLS_NEW state, so all sub-locks have to be in place.
744                  */
745                 lls = &lck->lls_sub[i];
746                 sub = lls->sub_lock;
747                 LASSERT(sub != NULL);
748                 sublock = sub->lss_cl.cls_lock;
749                 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
750                 if (rc == 0) {
751                         LASSERT(sublock->cll_state != CLS_FREEING);
752                         lov_sublock_hold(env, lck, i);
753                         if (sublock->cll_state == CLS_CACHED) {
754                                 rc = cl_use_try(subenv->lse_env, sublock);
755                                 if (rc != 0)
756                                         rc = lov_sublock_release(env, lck,
757                                                                  i, 1, rc);
758                         } else
759                                 rc = 0;
760                         lov_sublock_unlock(env, sub, closure, subenv);
761                 }
762                 result = lov_subresult(result, rc);
763                 if (result < 0)
764                         break;
765         }
766         cl_lock_closure_fini(closure);
767         RETURN(result);
768 }
769
770 #if 0
771 static int lock_lock_multi_match()
772 {
773         struct cl_lock          *lock    = slice->cls_lock;
774         struct cl_lock_descr    *subneed = &lov_env_info(env)->lti_ldescr;
775         struct lov_object       *loo     = cl2lov(lov->lls_cl.cls_obj);
776         struct lov_layout_raid0 *r0      = lov_r0(loo);
777         struct lov_lock_sub     *sub;
778         struct cl_object        *subobj;
779         obd_off  fstart;
780         obd_off  fend;
781         obd_off  start;
782         obd_off  end;
783         int i;
784
785         fstart = cl_offset(need->cld_obj, need->cld_start);
786         fend   = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
787         subneed->cld_mode = need->cld_mode;
788         cl_lock_mutex_get(env, lock);
789         for (i = 0; i < lov->lls_nr; ++i) {
790                 sub = &lov->lls_sub[i];
791                 if (sub->sub_lock == NULL)
792                         continue;
793                 subobj = sub->sub_descr.cld_obj;
794                 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
795                                            fstart, fend, &start, &end))
796                         continue;
797                 subneed->cld_start = cl_index(subobj, start);
798                 subneed->cld_end   = cl_index(subobj, end);
799                 subneed->cld_obj   = subobj;
800                 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
801                         result = 0;
802                         break;
803                 }
804         }
805         cl_lock_mutex_put(env, lock);
806 }
807 #endif
808
809 /**
810  * Check if the extent region \a descr is covered by \a child against the
811  * specific \a stripe.
812  */
813 static int lov_lock_stripe_is_matching(const struct lu_env *env,
814                                        struct lov_object *lov, int stripe,
815                                        const struct cl_lock_descr *child,
816                                        const struct cl_lock_descr *descr)
817 {
818         struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
819         obd_off start;
820         obd_off end;
821         int result;
822
823         if (lov_r0(lov)->lo_nr == 1)
824                 return cl_lock_ext_match(child, descr);
825
826         /*
827          * For a multi-stripes object:
828          * - make sure the descr only covers child's stripe, and
829          * - check if extent is matching.
830          */
831         start = cl_offset(&lov->lo_cl, descr->cld_start);
832         end   = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
833         result = end - start <= lsm->lsm_stripe_size &&
834                  stripe == lov_stripe_number(lsm, start) &&
835                  stripe == lov_stripe_number(lsm, end);
836         if (result) {
837                 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
838                 obd_off sub_start;
839                 obd_off sub_end;
840
841                 subd->cld_obj  = NULL;   /* don't need sub object at all */
842                 subd->cld_mode = descr->cld_mode;
843                 subd->cld_gid  = descr->cld_gid;
844                 result = lov_stripe_intersects(lsm, stripe, start, end,
845                                                &sub_start, &sub_end);
846                 LASSERT(result);
847                 subd->cld_start = cl_index(child->cld_obj, sub_start);
848                 subd->cld_end   = cl_index(child->cld_obj, sub_end);
849                 result = cl_lock_ext_match(child, subd);
850         }
851         return result;
852 }
853
854 /**
855  * An implementation of cl_lock_operations::clo_fits_into() method.
856  *
857  * Checks whether a lock (given by \a slice) is suitable for \a
858  * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
859  * O_APPEND write.
860  *
861  * \see ccc_lock_fits_into().
862  */
863 static int lov_lock_fits_into(const struct lu_env *env,
864                               const struct cl_lock_slice *slice,
865                               const struct cl_lock_descr *need,
866                               const struct cl_io *io)
867 {
868         struct lov_lock   *lov = cl2lov_lock(slice);
869         struct lov_object *obj = cl2lov(slice->cls_obj);
870         int result;
871
872         LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
873         LASSERT(lov->lls_nr > 0);
874
875         ENTRY;
876
877         if (need->cld_mode == CLM_GROUP)
878                 /*
879                  * always allow to match group lock.
880                  */
881                 result = cl_lock_ext_match(&lov->lls_orig, need);
882         else if (lov->lls_nr == 1) {
883                 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
884                 result = lov_lock_stripe_is_matching(env,
885                                                      cl2lov(slice->cls_obj),
886                                                      lov->lls_sub[0].sub_stripe,
887                                                      got, need);
888         } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
889                    !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
890                 /*
891                  * Multi-stripe locks are only suitable for `quick' IO and for
892                  * glimpse.
893                  */
894                 result = 0;
895         else
896                 /*
897                  * Most general case: multi-stripe existing lock, and
898                  * (potentially) multi-stripe @need lock. Check that @need is
899                  * covered by @lov's sub-locks.
900                  *
901                  * For now, ignore lock expansions made by the server, and
902                  * match against original lock extent.
903                  */
904                 result = cl_lock_ext_match(&lov->lls_orig, need);
905         CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
906                PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
907                lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
908                result);
909         RETURN(result);
910 }
911
912 void lov_lock_unlink(const struct lu_env *env,
913                      struct lov_lock_link *link, struct lovsub_lock *sub)
914 {
915         struct lov_lock *lck    = link->lll_super;
916         struct cl_lock  *parent = lck->lls_cl.cls_lock;
917
918         LASSERT(cl_lock_is_mutexed(parent));
919         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
920         ENTRY;
921
922         list_del_init(&link->lll_list);
923         LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
924         /* yank this sub-lock from parent's array */
925         lck->lls_sub[link->lll_idx].sub_lock = NULL;
926         LASSERT(lck->lls_nr_filled > 0);
927         lck->lls_nr_filled--;
928         lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
929         cl_lock_put(env, parent);
930         OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
931         EXIT;
932 }
933
934 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
935                                          struct lov_lock *lck,
936                                          struct lovsub_lock *sub)
937 {
938         struct lov_lock_link *scan;
939
940         LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
941         ENTRY;
942
943         list_for_each_entry(scan, &sub->lss_parents, lll_list) {
944                 if (scan->lll_super == lck)
945                         RETURN(scan);
946         }
947         RETURN(NULL);
948 }
949
950 /**
951  * An implementation of cl_lock_operations::clo_delete() method. This is
952  * invoked for "top-to-bottom" delete, when lock destruction starts from the
953  * top-lock, e.g., as a result of inode destruction.
954  *
955  * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
956  * this is done separately elsewhere:
957  *
958  *     - for inode destruction, lov_object_delete() calls cl_object_kill() for
959  *       each sub-object, purging its locks;
960  *
961  *     - in other cases (e.g., a fatal error with a top-lock) sub-locks are
962  *       left in the cache.
963  */
964 static void lov_lock_delete(const struct lu_env *env,
965                             const struct cl_lock_slice *slice)
966 {
967         struct lov_lock        *lck     = cl2lov_lock(slice);
968         struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
969         int i;
970
971         LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
972         ENTRY;
973
974         for (i = 0; i < lck->lls_nr; ++i) {
975                 struct lov_lock_sub *lls;
976                 struct lovsub_lock  *lsl;
977                 struct cl_lock      *sublock;
978                 int rc;
979
980                 lls = &lck->lls_sub[i];
981                 lsl = lls->sub_lock;
982                 if (lsl == NULL)
983                         continue;
984
985                 sublock = lsl->lss_cl.cls_lock;
986                 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
987                 if (rc == 0) {
988                         if (lck->lls_sub[i].sub_flags & LSF_HELD)
989                                 lov_sublock_release(env, lck, i, 1, 0);
990                         if (sublock->cll_state < CLS_FREEING) {
991                                 struct lov_lock_link *link;
992
993                                 link = lov_lock_link_find(env, lck, lsl);
994                                 LASSERT(link != NULL);
995                                 lov_lock_unlink(env, link, lsl);
996                                 LASSERT(lck->lls_sub[i].sub_lock == NULL);
997                         }
998                         lov_sublock_unlock(env, lsl, closure, NULL);
999                 } else if (rc == CLO_REPEAT) {
1000                         --i; /* repeat with this lock */
1001                 } else {
1002                         CL_LOCK_DEBUG(D_ERROR, env, sublock,
1003                                       "Cannot get sub-lock for delete: %i\n",
1004                                       rc);
1005                 }
1006         }
1007         cl_lock_closure_fini(closure);
1008         EXIT;
1009 }
1010
1011 static int lov_lock_print(const struct lu_env *env, void *cookie,
1012                           lu_printer_t p, const struct cl_lock_slice *slice)
1013 {
1014         struct lov_lock *lck = cl2lov_lock(slice);
1015         int              i;
1016
1017         (*p)(env, cookie, "%d\n", lck->lls_nr);
1018         for (i = 0; i < lck->lls_nr; ++i) {
1019                 struct lov_lock_sub *sub;
1020
1021                 sub = &lck->lls_sub[i];
1022                 (*p)(env, cookie, "    %d %x: ", i, sub->sub_flags);
1023                 if (sub->sub_lock != NULL)
1024                         cl_lock_print(env, cookie, p,
1025                                       sub->sub_lock->lss_cl.cls_lock);
1026                 else
1027                         (*p)(env, cookie, "---\n");
1028         }
1029         return 0;
1030 }
1031
1032 static const struct cl_lock_operations lov_lock_ops = {
1033         .clo_fini      = lov_lock_fini,
1034         .clo_enqueue   = lov_lock_enqueue,
1035         .clo_wait      = lov_lock_wait,
1036         .clo_use       = lov_lock_use,
1037         .clo_unuse     = lov_lock_unuse,
1038         .clo_fits_into = lov_lock_fits_into,
1039         .clo_delete    = lov_lock_delete,
1040         .clo_print     = lov_lock_print
1041 };
1042
1043 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1044                         struct cl_lock *lock, const struct cl_io *io)
1045 {
1046         struct lov_lock *lck;
1047         int result;
1048
1049         ENTRY;
1050         OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1051         if (lck != NULL) {
1052                 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1053                 result = lov_lock_sub_init(env, lck, io);
1054         } else
1055                 result = -ENOMEM;
1056         RETURN(result);
1057 }
1058
1059 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1060                                                struct cl_lock *parent)
1061 {
1062         struct cl_lock_closure *closure;
1063
1064         closure = &lov_env_info(env)->lti_closure;
1065         LASSERT(list_empty(&closure->clc_list));
1066         cl_lock_closure_init(env, closure, parent, 1);
1067         return closure;
1068 }
1069
1070
1071 /** @} lov */