1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
45 /** \addtogroup lov lov @{ */
47 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
48 struct cl_lock *parent);
50 /*****************************************************************************
52 * Lov lock operations.
56 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
57 struct cl_lock *parent,
58 struct lov_lock_sub *lls)
60 struct lov_sublock_env *subenv;
61 struct lov_io *lio = lov_env_io(env);
62 struct cl_io *io = lio->lis_cl.cis_io;
63 struct lov_io_sub *sub;
65 subenv = &lov_env_session(env)->ls_subenv;
68 * FIXME: We tend to use the subio's env & io to call the sublock
69 * lock operations because osc lock sometimes stores some control
70 * variables in thread's IO infomation(Now only lockless information).
71 * However, if the lock's host(object) is different from the object
72 * for current IO, we have no way to get the subenv and subio because
73 * they are not initialized at all. As a temp fix, in this case,
74 * we still borrow the parent's env to call sublock operations.
76 if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
77 subenv->lse_env = env;
79 subenv->lse_sub = NULL;
82 sub = lov_sub_get(env, lio, lls->sub_stripe);
84 subenv->lse_env = sub->sub_env;
85 subenv->lse_io = sub->sub_io;
86 subenv->lse_sub = sub;
94 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
96 if (subenv && subenv->lse_sub)
97 lov_sub_put(subenv->lse_sub);
100 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
101 struct cl_lock *sublock, int idx,
102 struct lov_lock_link *link)
104 struct lovsub_lock *lsl;
105 struct cl_lock *parent = lck->lls_cl.cls_lock;
108 LASSERT(cl_lock_is_mutexed(parent));
109 LASSERT(cl_lock_is_mutexed(sublock));
112 lsl = cl2sub_lock(sublock);
114 * check that sub-lock doesn't have lock link to this top-lock.
116 LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
117 LASSERT(idx < lck->lls_nr);
119 lck->lls_sub[idx].sub_lock = lsl;
120 lck->lls_nr_filled++;
121 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
122 list_add_tail(&link->lll_list, &lsl->lss_parents);
124 link->lll_super = lck;
126 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
127 lck->lls_sub[idx].sub_flags |= LSF_HELD;
128 cl_lock_user_add(env, sublock);
130 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
131 LASSERT(rc == 0); /* there is no way this can fail, currently */
135 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
136 const struct cl_io *io,
137 struct lov_lock *lck,
138 int idx, struct lov_lock_link **out)
140 struct cl_lock *sublock;
141 struct cl_lock *parent;
142 struct lov_lock_link *link;
144 LASSERT(idx < lck->lls_nr);
147 OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
149 struct lov_sublock_env *subenv;
150 struct lov_lock_sub *lls;
151 struct cl_lock_descr *descr;
153 parent = lck->lls_cl.cls_lock;
154 lls = &lck->lls_sub[idx];
155 descr = &lls->sub_descr;
157 subenv = lov_sublock_env_get(env, parent, lls);
158 if (!IS_ERR(subenv)) {
159 /* CAVEAT: Don't try to add a field in lov_lock_sub
160 * to remember the subio. This is because lock is able
161 * to be cached, but this is not true for IO. This
162 * further means a sublock might be referenced in
163 * different io context. -jay */
165 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166 descr, "lov-parent", parent);
167 lov_sublock_env_put(subenv);
170 sublock = (void*)subenv;
173 if (!IS_ERR(sublock))
176 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
178 sublock = ERR_PTR(-ENOMEM);
182 static void lov_sublock_unlock(const struct lu_env *env,
183 struct lovsub_lock *lsl,
184 struct cl_lock_closure *closure,
185 struct lov_sublock_env *subenv)
188 lov_sublock_env_put(subenv);
189 lsl->lss_active = NULL;
190 cl_lock_disclosure(env, closure);
194 static int lov_sublock_lock(const struct lu_env *env,
195 struct lov_lock *lck,
196 struct lov_lock_sub *lls,
197 struct cl_lock_closure *closure,
198 struct lov_sublock_env **lsep)
200 struct cl_lock *child;
204 LASSERT(list_empty(&closure->clc_list));
206 child = lls->sub_lock->lss_cl.cls_lock;
207 result = cl_lock_closure_build(env, child, closure);
209 struct cl_lock *parent = closure->clc_origin;
211 LASSERT(cl_lock_is_mutexed(child));
212 lls->sub_lock->lss_active = parent;
214 if (unlikely(child->cll_state == CLS_FREEING)) {
215 struct lov_lock_link *link;
217 * we could race with lock deletion which temporarily
218 * put the lock in freeing state, bug 19080.
220 LASSERT(!(lls->sub_flags & LSF_HELD));
222 link = lov_lock_link_find(env, lck, lls->sub_lock);
223 LASSERT(link != NULL);
224 lov_lock_unlink(env, link, lls->sub_lock);
225 lov_sublock_unlock(env, lls->sub_lock, closure, NULL);
228 struct lov_sublock_env *subenv;
229 subenv = lov_sublock_env_get(env, parent, lls);
230 if (IS_ERR(subenv)) {
231 lov_sublock_unlock(env, lls->sub_lock,
233 result = PTR_ERR(subenv);
243 * Updates the result of a top-lock operation from a result of sub-lock
244 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
245 * over sub-locks and lov_subresult() is used to calculate return value of a
246 * top-operation. To this end, possible return values of sub-operations are
250 * - CLO_WAIT wait for event
251 * - CLO_REPEAT repeat top-operation
252 * - -ne fundamental error
254 * Top-level return code can only go down through this list. CLO_REPEAT
255 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
256 * has to be rechecked by the upper layer.
258 static int lov_subresult(int result, int rc)
263 LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
264 LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
265 CLASSERT(CLO_WAIT < CLO_REPEAT);
269 /* calculate ranks in the ordering above */
270 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
271 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
273 if (result_rank < rc_rank)
279 * Creates sub-locks for a given lov_lock for the first time.
281 * Goes through all sub-objects of top-object, and creates sub-locks on every
282 * sub-object intersecting with top-lock extent. This is complicated by the
283 * fact that top-lock (that is being created) can be accessed concurrently
284 * through already created sub-locks (possibly shared with other top-locks).
286 static int lov_lock_sub_init(const struct lu_env *env,
287 struct lov_lock *lck, const struct cl_io *io)
300 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
301 struct lov_layout_raid0 *r0 = lov_r0(loo);
302 struct cl_lock *parent = lck->lls_cl.cls_lock;
306 lck->lls_orig = parent->cll_descr;
307 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
308 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
310 start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
311 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
313 * XXX for wide striping smarter algorithm is desirable,
314 * breaking out of the loop, early.
316 stripe = (start_stripe + i) % r0->lo_nr;
317 if (lov_stripe_intersects(r0->lo_lsm, stripe,
318 file_start, file_end, &start, &end))
322 OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
323 if (lck->lls_sub == NULL)
328 * First, fill in sub-lock descriptions in
329 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
330 * (called below in this function, and by lov_lock_enqueue()) to
331 * create sub-locks. At this moment, no other thread can access
334 for (j = 0, nr = 0; j < i; ++j) {
335 stripe = (start_stripe + j) % r0->lo_nr;
336 if (lov_stripe_intersects(r0->lo_lsm, stripe,
337 file_start, file_end, &start, &end)) {
338 struct cl_lock_descr *descr;
340 descr = &lck->lls_sub[nr].sub_descr;
342 LASSERT(descr->cld_obj == NULL);
343 descr->cld_obj = lovsub2cl(r0->lo_sub[stripe]);
344 descr->cld_start = cl_index(descr->cld_obj, start);
345 descr->cld_end = cl_index(descr->cld_obj, end);
346 descr->cld_mode = parent->cll_descr.cld_mode;
347 descr->cld_gid = parent->cll_descr.cld_gid;
348 /* XXX has no effect */
349 lck->lls_sub[nr].sub_got = *descr;
350 lck->lls_sub[nr].sub_stripe = stripe;
354 LASSERT(nr == lck->lls_nr);
356 * Then, create sub-locks. Once at least one sub-lock was created,
357 * top-lock can be reached by other threads.
359 for (i = 0; i < lck->lls_nr; ++i) {
360 struct cl_lock *sublock;
361 struct lov_lock_link *link;
363 if (lck->lls_sub[i].sub_lock == NULL) {
364 sublock = lov_sublock_alloc(env, io, lck, i, &link);
365 if (IS_ERR(sublock)) {
366 result = PTR_ERR(sublock);
369 cl_lock_mutex_get(env, sublock);
370 cl_lock_mutex_get(env, parent);
372 * recheck under mutex that sub-lock wasn't created
373 * concurrently, and that top-lock is still alive.
375 if (lck->lls_sub[i].sub_lock == NULL &&
376 parent->cll_state < CLS_FREEING) {
377 lov_sublock_adopt(env, lck, sublock, i, link);
378 cl_lock_mutex_put(env, parent);
380 cl_lock_mutex_put(env, parent);
381 cl_lock_unhold(env, sublock,
382 "lov-parent", parent);
384 cl_lock_mutex_put(env, sublock);
388 * Some sub-locks can be missing at this point. This is not a problem,
389 * because enqueue will create them anyway. Main duty of this function
390 * is to fill in sub-lock descriptions in a race free manner.
395 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
396 int i, int deluser, int rc)
398 struct cl_lock *parent = lck->lls_cl.cls_lock;
400 LASSERT(cl_lock_is_mutexed(parent));
403 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
404 struct cl_lock *sublock;
407 LASSERT(lck->lls_sub[i].sub_lock != NULL);
408 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
409 LASSERT(cl_lock_is_mutexed(sublock));
411 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
413 cl_lock_user_del(env, sublock);
415 * If the last hold is released, and cancellation is pending
416 * for a sub-lock, release parent mutex, to avoid keeping it
417 * while sub-lock is being paged out.
419 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
420 sublock->cll_descr.cld_mode == CLM_GROUP ||
421 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
422 sublock->cll_holds == 1;
424 cl_lock_mutex_put(env, parent);
425 cl_lock_unhold(env, sublock, "lov-parent", parent);
427 cl_lock_mutex_get(env, parent);
428 rc = lov_subresult(rc, CLO_REPEAT);
431 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
432 * not backed by a reference on a
433 * sub-lock. lovsub_lock_delete() will clear
434 * lck->lls_sub[i].sub_lock under semaphores, just before
435 * sub-lock is destroyed.
441 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
444 struct cl_lock *parent = lck->lls_cl.cls_lock;
446 LASSERT(cl_lock_is_mutexed(parent));
449 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
450 struct cl_lock *sublock;
452 LASSERT(lck->lls_sub[i].sub_lock != NULL);
453 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
454 LASSERT(cl_lock_is_mutexed(sublock));
455 LASSERT(sublock->cll_state != CLS_FREEING);
457 lck->lls_sub[i].sub_flags |= LSF_HELD;
459 cl_lock_get_trust(sublock);
460 cl_lock_hold_add(env, sublock, "lov-parent", parent);
461 cl_lock_user_add(env, sublock);
462 cl_lock_put(env, sublock);
467 static void lov_lock_fini(const struct lu_env *env,
468 struct cl_lock_slice *slice)
470 struct lov_lock *lck;
474 lck = cl2lov_lock(slice);
475 LASSERT(lck->lls_nr_filled == 0);
476 if (lck->lls_sub != NULL) {
477 for (i = 0; i < lck->lls_nr; ++i)
479 * No sub-locks exists at this point, as sub-lock has
480 * a reference on its parent.
482 LASSERT(lck->lls_sub[i].sub_lock == NULL);
483 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
485 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
490 * Tries to advance a state machine of a given sub-lock toward enqueuing of
493 * \retval 0 if state-transition can proceed
494 * \retval -ve otherwise.
496 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
497 struct cl_lock *sublock,
498 struct cl_io *io, __u32 enqflags, int last)
503 /* first, try to enqueue a sub-lock ... */
504 result = cl_enqueue_try(env, sublock, io, enqflags);
505 if (sublock->cll_state == CLS_ENQUEUED)
506 /* if it is enqueued, try to `wait' on it---maybe it's already
508 result = cl_wait_try(env, sublock);
510 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
511 * parallel, otherwise---enqueue has to wait until sub-lock is granted
512 * before proceeding to the next one.
514 if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
515 enqflags & CEF_ASYNC && !last)
521 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
523 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
524 struct cl_io *io, struct lov_lock *lck, int idx)
526 struct lov_lock_link *link;
527 struct cl_lock *sublock;
530 LASSERT(parent->cll_depth == 1);
531 cl_lock_mutex_put(env, parent);
532 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
533 if (!IS_ERR(sublock))
534 cl_lock_mutex_get(env, sublock);
535 cl_lock_mutex_get(env, parent);
537 if (!IS_ERR(sublock)) {
538 if (parent->cll_state == CLS_QUEUING &&
539 lck->lls_sub[idx].sub_lock == NULL)
540 lov_sublock_adopt(env, lck, sublock, idx, link);
542 /* other thread allocated sub-lock, or enqueue is no
544 cl_lock_mutex_put(env, parent);
545 cl_lock_unhold(env, sublock, "lov-parent", parent);
546 cl_lock_mutex_get(env, parent);
548 cl_lock_mutex_put(env, sublock);
551 result = PTR_ERR(sublock);
556 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
557 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
558 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
559 * state machines in the face of sub-locks sharing (by multiple top-locks),
560 * and concurrent sub-lock cancellations.
562 static int lov_lock_enqueue(const struct lu_env *env,
563 const struct cl_lock_slice *slice,
564 struct cl_io *io, __u32 enqflags)
566 struct cl_lock *lock = slice->cls_lock;
567 struct lov_lock *lck = cl2lov_lock(slice);
568 struct cl_lock_closure *closure = lov_closure_get(env, lock);
571 enum cl_lock_state minstate;
575 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
577 struct lovsub_lock *sub;
578 struct lov_lock_sub *lls;
579 struct cl_lock *sublock;
580 struct lov_sublock_env *subenv;
582 if (lock->cll_state != CLS_QUEUING) {
584 * Lock might have left QUEUING state if previous
585 * iteration released its mutex. Stop enqueing in this
586 * case and let the upper layer to decide what to do.
588 LASSERT(i > 0 && result != 0);
592 lls = &lck->lls_sub[i];
595 * Sub-lock might have been canceled, while top-lock was
599 result = lov_sublock_fill(env, lock, io, lck, i);
600 /* lov_sublock_fill() released @lock mutex,
604 sublock = sub->lss_cl.cls_lock;
605 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
607 lov_sublock_hold(env, lck, i);
608 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
609 subenv->lse_io, enqflags,
610 i == lck->lls_nr - 1);
611 minstate = min(minstate, sublock->cll_state);
613 * Don't hold a sub-lock in CLS_CACHED state, see
614 * description for lov_lock::lls_sub.
616 if (sublock->cll_state > CLS_HELD)
617 rc = lov_sublock_release(env, lck, i, 1, rc);
618 lov_sublock_unlock(env, sub, closure, subenv);
620 result = lov_subresult(result, rc);
624 cl_lock_closure_fini(closure);
625 RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
628 static int lov_lock_unuse(const struct lu_env *env,
629 const struct cl_lock_slice *slice)
631 struct lov_lock *lck = cl2lov_lock(slice);
632 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
638 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
640 struct lovsub_lock *sub;
641 struct cl_lock *sublock;
642 struct lov_lock_sub *lls;
643 struct lov_sublock_env *subenv;
645 /* top-lock state cannot change concurrently, because single
646 * thread (one that released the last hold) carries unlocking
647 * to the completion. */
648 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
649 lls = &lck->lls_sub[i];
654 sublock = sub->lss_cl.cls_lock;
655 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
657 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
658 LASSERT(sublock->cll_state == CLS_HELD);
659 rc = cl_unuse_try(subenv->lse_env, sublock);
661 rc = lov_sublock_release(env, lck,
664 lov_sublock_unlock(env, sub, closure, subenv);
666 result = lov_subresult(result, rc);
670 if (result == 0 && lck->lls_unuse_race) {
671 lck->lls_unuse_race = 0;
674 cl_lock_closure_fini(closure);
678 static int lov_lock_wait(const struct lu_env *env,
679 const struct cl_lock_slice *slice)
681 struct lov_lock *lck = cl2lov_lock(slice);
682 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
683 enum cl_lock_state minstate;
689 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
691 struct lovsub_lock *sub;
692 struct cl_lock *sublock;
693 struct lov_lock_sub *lls;
694 struct lov_sublock_env *subenv;
696 lls = &lck->lls_sub[i];
698 LASSERT(sub != NULL);
699 sublock = sub->lss_cl.cls_lock;
700 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
702 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
703 if (sublock->cll_state < CLS_HELD)
704 rc = cl_wait_try(env, sublock);
706 minstate = min(minstate, sublock->cll_state);
707 lov_sublock_unlock(env, sub, closure, subenv);
709 result = lov_subresult(result, rc);
713 cl_lock_closure_fini(closure);
714 RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
717 static int lov_lock_use(const struct lu_env *env,
718 const struct cl_lock_slice *slice)
720 struct lov_lock *lck = cl2lov_lock(slice);
721 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
725 LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
728 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
730 struct lovsub_lock *sub;
731 struct cl_lock *sublock;
732 struct lov_lock_sub *lls;
733 struct lov_sublock_env *subenv;
735 if (slice->cls_lock->cll_state != CLS_CACHED) {
736 /* see comment in lov_lock_enqueue(). */
737 LASSERT(i > 0 && result != 0);
741 * if a sub-lock was destroyed while top-lock was in
742 * CLS_CACHED state, top-lock would have been moved into
743 * CLS_NEW state, so all sub-locks have to be in place.
745 lls = &lck->lls_sub[i];
747 LASSERT(sub != NULL);
748 sublock = sub->lss_cl.cls_lock;
749 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
751 LASSERT(sublock->cll_state != CLS_FREEING);
752 lov_sublock_hold(env, lck, i);
753 if (sublock->cll_state == CLS_CACHED) {
754 rc = cl_use_try(subenv->lse_env, sublock);
756 rc = lov_sublock_release(env, lck,
760 lov_sublock_unlock(env, sub, closure, subenv);
762 result = lov_subresult(result, rc);
766 cl_lock_closure_fini(closure);
771 static int lock_lock_multi_match()
773 struct cl_lock *lock = slice->cls_lock;
774 struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
775 struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
776 struct lov_layout_raid0 *r0 = lov_r0(loo);
777 struct lov_lock_sub *sub;
778 struct cl_object *subobj;
785 fstart = cl_offset(need->cld_obj, need->cld_start);
786 fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
787 subneed->cld_mode = need->cld_mode;
788 cl_lock_mutex_get(env, lock);
789 for (i = 0; i < lov->lls_nr; ++i) {
790 sub = &lov->lls_sub[i];
791 if (sub->sub_lock == NULL)
793 subobj = sub->sub_descr.cld_obj;
794 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
795 fstart, fend, &start, &end))
797 subneed->cld_start = cl_index(subobj, start);
798 subneed->cld_end = cl_index(subobj, end);
799 subneed->cld_obj = subobj;
800 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
805 cl_lock_mutex_put(env, lock);
810 * Check if the extent region \a descr is covered by \a child against the
811 * specific \a stripe.
813 static int lov_lock_stripe_is_matching(const struct lu_env *env,
814 struct lov_object *lov, int stripe,
815 const struct cl_lock_descr *child,
816 const struct cl_lock_descr *descr)
818 struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
823 if (lov_r0(lov)->lo_nr == 1)
824 return cl_lock_ext_match(child, descr);
827 * For a multi-stripes object:
828 * - make sure the descr only covers child's stripe, and
829 * - check if extent is matching.
831 start = cl_offset(&lov->lo_cl, descr->cld_start);
832 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
833 result = end - start <= lsm->lsm_stripe_size &&
834 stripe == lov_stripe_number(lsm, start) &&
835 stripe == lov_stripe_number(lsm, end);
837 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
841 subd->cld_obj = NULL; /* don't need sub object at all */
842 subd->cld_mode = descr->cld_mode;
843 subd->cld_gid = descr->cld_gid;
844 result = lov_stripe_intersects(lsm, stripe, start, end,
845 &sub_start, &sub_end);
847 subd->cld_start = cl_index(child->cld_obj, sub_start);
848 subd->cld_end = cl_index(child->cld_obj, sub_end);
849 result = cl_lock_ext_match(child, subd);
855 * An implementation of cl_lock_operations::clo_fits_into() method.
857 * Checks whether a lock (given by \a slice) is suitable for \a
858 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
861 * \see ccc_lock_fits_into().
863 static int lov_lock_fits_into(const struct lu_env *env,
864 const struct cl_lock_slice *slice,
865 const struct cl_lock_descr *need,
866 const struct cl_io *io)
868 struct lov_lock *lov = cl2lov_lock(slice);
869 struct lov_object *obj = cl2lov(slice->cls_obj);
872 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
873 LASSERT(lov->lls_nr > 0);
877 if (need->cld_mode == CLM_GROUP)
879 * always allow to match group lock.
881 result = cl_lock_ext_match(&lov->lls_orig, need);
882 else if (lov->lls_nr == 1) {
883 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
884 result = lov_lock_stripe_is_matching(env,
885 cl2lov(slice->cls_obj),
886 lov->lls_sub[0].sub_stripe,
888 } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
889 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
891 * Multi-stripe locks are only suitable for `quick' IO and for
897 * Most general case: multi-stripe existing lock, and
898 * (potentially) multi-stripe @need lock. Check that @need is
899 * covered by @lov's sub-locks.
901 * For now, ignore lock expansions made by the server, and
902 * match against original lock extent.
904 result = cl_lock_ext_match(&lov->lls_orig, need);
905 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
906 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
907 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
912 void lov_lock_unlink(const struct lu_env *env,
913 struct lov_lock_link *link, struct lovsub_lock *sub)
915 struct lov_lock *lck = link->lll_super;
916 struct cl_lock *parent = lck->lls_cl.cls_lock;
918 LASSERT(cl_lock_is_mutexed(parent));
919 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
922 list_del_init(&link->lll_list);
923 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
924 /* yank this sub-lock from parent's array */
925 lck->lls_sub[link->lll_idx].sub_lock = NULL;
926 LASSERT(lck->lls_nr_filled > 0);
927 lck->lls_nr_filled--;
928 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
929 cl_lock_put(env, parent);
930 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
934 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
935 struct lov_lock *lck,
936 struct lovsub_lock *sub)
938 struct lov_lock_link *scan;
940 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
943 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
944 if (scan->lll_super == lck)
951 * An implementation of cl_lock_operations::clo_delete() method. This is
952 * invoked for "top-to-bottom" delete, when lock destruction starts from the
953 * top-lock, e.g., as a result of inode destruction.
955 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
956 * this is done separately elsewhere:
958 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
959 * each sub-object, purging its locks;
961 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
964 static void lov_lock_delete(const struct lu_env *env,
965 const struct cl_lock_slice *slice)
967 struct lov_lock *lck = cl2lov_lock(slice);
968 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
971 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
974 for (i = 0; i < lck->lls_nr; ++i) {
975 struct lov_lock_sub *lls;
976 struct lovsub_lock *lsl;
977 struct cl_lock *sublock;
980 lls = &lck->lls_sub[i];
985 sublock = lsl->lss_cl.cls_lock;
986 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
988 if (lck->lls_sub[i].sub_flags & LSF_HELD)
989 lov_sublock_release(env, lck, i, 1, 0);
990 if (sublock->cll_state < CLS_FREEING) {
991 struct lov_lock_link *link;
993 link = lov_lock_link_find(env, lck, lsl);
994 LASSERT(link != NULL);
995 lov_lock_unlink(env, link, lsl);
996 LASSERT(lck->lls_sub[i].sub_lock == NULL);
998 lov_sublock_unlock(env, lsl, closure, NULL);
999 } else if (rc == CLO_REPEAT) {
1000 --i; /* repeat with this lock */
1002 CL_LOCK_DEBUG(D_ERROR, env, sublock,
1003 "Cannot get sub-lock for delete: %i\n",
1007 cl_lock_closure_fini(closure);
1011 static int lov_lock_print(const struct lu_env *env, void *cookie,
1012 lu_printer_t p, const struct cl_lock_slice *slice)
1014 struct lov_lock *lck = cl2lov_lock(slice);
1017 (*p)(env, cookie, "%d\n", lck->lls_nr);
1018 for (i = 0; i < lck->lls_nr; ++i) {
1019 struct lov_lock_sub *sub;
1021 sub = &lck->lls_sub[i];
1022 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
1023 if (sub->sub_lock != NULL)
1024 cl_lock_print(env, cookie, p,
1025 sub->sub_lock->lss_cl.cls_lock);
1027 (*p)(env, cookie, "---\n");
1032 static const struct cl_lock_operations lov_lock_ops = {
1033 .clo_fini = lov_lock_fini,
1034 .clo_enqueue = lov_lock_enqueue,
1035 .clo_wait = lov_lock_wait,
1036 .clo_use = lov_lock_use,
1037 .clo_unuse = lov_lock_unuse,
1038 .clo_fits_into = lov_lock_fits_into,
1039 .clo_delete = lov_lock_delete,
1040 .clo_print = lov_lock_print
1043 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1044 struct cl_lock *lock, const struct cl_io *io)
1046 struct lov_lock *lck;
1050 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1052 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1053 result = lov_lock_sub_init(env, lck, io);
1059 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1060 struct cl_lock *parent)
1062 struct cl_lock_closure *closure;
1064 closure = &lov_env_info(env)->lti_closure;
1065 LASSERT(list_empty(&closure->clc_list));
1066 cl_lock_closure_init(env, closure, parent, 1);