1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
49 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
50 struct cl_lock *parent);
52 static int lov_lock_unuse(const struct lu_env *env,
53 const struct cl_lock_slice *slice);
54 /*****************************************************************************
56 * Lov lock operations.
60 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
61 struct cl_lock *parent,
62 struct lov_lock_sub *lls)
64 struct lov_sublock_env *subenv;
65 struct lov_io *lio = lov_env_io(env);
66 struct cl_io *io = lio->lis_cl.cis_io;
67 struct lov_io_sub *sub;
69 subenv = &lov_env_session(env)->ls_subenv;
72 * FIXME: We tend to use the subio's env & io to call the sublock
73 * lock operations because osc lock sometimes stores some control
74 * variables in thread's IO infomation(Now only lockless information).
75 * However, if the lock's host(object) is different from the object
76 * for current IO, we have no way to get the subenv and subio because
77 * they are not initialized at all. As a temp fix, in this case,
78 * we still borrow the parent's env to call sublock operations.
80 if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
81 subenv->lse_env = env;
83 subenv->lse_sub = NULL;
85 sub = lov_sub_get(env, lio, lls->sub_stripe);
87 subenv->lse_env = sub->sub_env;
88 subenv->lse_io = sub->sub_io;
89 subenv->lse_sub = sub;
97 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
99 if (subenv && subenv->lse_sub)
100 lov_sub_put(subenv->lse_sub);
103 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
104 struct cl_lock *sublock, int idx,
105 struct lov_lock_link *link)
107 struct lovsub_lock *lsl;
108 struct cl_lock *parent = lck->lls_cl.cls_lock;
111 LASSERT(cl_lock_is_mutexed(parent));
112 LASSERT(cl_lock_is_mutexed(sublock));
115 lsl = cl2sub_lock(sublock);
117 * check that sub-lock doesn't have lock link to this top-lock.
119 LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
120 LASSERT(idx < lck->lls_nr);
122 lck->lls_sub[idx].sub_lock = lsl;
123 lck->lls_nr_filled++;
124 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
125 list_add_tail(&link->lll_list, &lsl->lss_parents);
127 link->lll_super = lck;
129 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
130 lck->lls_sub[idx].sub_flags |= LSF_HELD;
131 cl_lock_user_add(env, sublock);
133 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
134 LASSERT(rc == 0); /* there is no way this can fail, currently */
138 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
139 const struct cl_io *io,
140 struct lov_lock *lck,
141 int idx, struct lov_lock_link **out)
143 struct cl_lock *sublock;
144 struct cl_lock *parent;
145 struct lov_lock_link *link;
147 LASSERT(idx < lck->lls_nr);
150 OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
152 struct lov_sublock_env *subenv;
153 struct lov_lock_sub *lls;
154 struct cl_lock_descr *descr;
156 parent = lck->lls_cl.cls_lock;
157 lls = &lck->lls_sub[idx];
158 descr = &lls->sub_descr;
160 subenv = lov_sublock_env_get(env, parent, lls);
161 if (!IS_ERR(subenv)) {
162 /* CAVEAT: Don't try to add a field in lov_lock_sub
163 * to remember the subio. This is because lock is able
164 * to be cached, but this is not true for IO. This
165 * further means a sublock might be referenced in
166 * different io context. -jay */
168 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
169 descr, "lov-parent", parent);
170 lov_sublock_env_put(subenv);
173 sublock = (void*)subenv;
176 if (!IS_ERR(sublock))
179 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
181 sublock = ERR_PTR(-ENOMEM);
185 static void lov_sublock_unlock(const struct lu_env *env,
186 struct lovsub_lock *lsl,
187 struct cl_lock_closure *closure,
188 struct lov_sublock_env *subenv)
191 lov_sublock_env_put(subenv);
192 lsl->lss_active = NULL;
193 cl_lock_disclosure(env, closure);
197 static int lov_sublock_lock(const struct lu_env *env,
198 struct lov_lock *lck,
199 struct lov_lock_sub *lls,
200 struct cl_lock_closure *closure,
201 struct lov_sublock_env **lsep)
203 struct lovsub_lock *sublock;
204 struct cl_lock *child;
208 LASSERT(list_empty(&closure->clc_list));
210 sublock = lls->sub_lock;
211 child = sublock->lss_cl.cls_lock;
212 result = cl_lock_closure_build(env, child, closure);
214 struct cl_lock *parent = closure->clc_origin;
216 LASSERT(cl_lock_is_mutexed(child));
217 sublock->lss_active = parent;
219 if (unlikely(child->cll_state == CLS_FREEING)) {
220 struct lov_lock_link *link;
222 * we could race with lock deletion which temporarily
223 * put the lock in freeing state, bug 19080.
225 LASSERT(!(lls->sub_flags & LSF_HELD));
227 link = lov_lock_link_find(env, lck, sublock);
228 LASSERT(link != NULL);
229 lov_lock_unlink(env, link, sublock);
230 lov_sublock_unlock(env, sublock, closure, NULL);
231 lck->lls_cancel_race = 1;
234 struct lov_sublock_env *subenv;
235 subenv = lov_sublock_env_get(env, parent, lls);
236 if (IS_ERR(subenv)) {
237 lov_sublock_unlock(env, sublock,
239 result = PTR_ERR(subenv);
249 * Updates the result of a top-lock operation from a result of sub-lock
250 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
251 * over sub-locks and lov_subresult() is used to calculate return value of a
252 * top-operation. To this end, possible return values of sub-operations are
256 * - CLO_WAIT wait for event
257 * - CLO_REPEAT repeat top-operation
258 * - -ne fundamental error
260 * Top-level return code can only go down through this list. CLO_REPEAT
261 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
262 * has to be rechecked by the upper layer.
264 static int lov_subresult(int result, int rc)
269 LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
270 LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
271 CLASSERT(CLO_WAIT < CLO_REPEAT);
275 /* calculate ranks in the ordering above */
276 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
277 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
279 if (result_rank < rc_rank)
285 * Creates sub-locks for a given lov_lock for the first time.
287 * Goes through all sub-objects of top-object, and creates sub-locks on every
288 * sub-object intersecting with top-lock extent. This is complicated by the
289 * fact that top-lock (that is being created) can be accessed concurrently
290 * through already created sub-locks (possibly shared with other top-locks).
292 static int lov_lock_sub_init(const struct lu_env *env,
293 struct lov_lock *lck, const struct cl_io *io)
303 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
304 struct lov_layout_raid0 *r0 = lov_r0(loo);
305 struct cl_lock *parent = lck->lls_cl.cls_lock;
309 lck->lls_orig = parent->cll_descr;
310 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
311 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
313 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
315 * XXX for wide striping smarter algorithm is desirable,
316 * breaking out of the loop, early.
318 if (lov_stripe_intersects(r0->lo_lsm, i,
319 file_start, file_end, &start, &end))
323 OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
324 if (lck->lls_sub == NULL)
329 * First, fill in sub-lock descriptions in
330 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
331 * (called below in this function, and by lov_lock_enqueue()) to
332 * create sub-locks. At this moment, no other thread can access
335 for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
336 if (lov_stripe_intersects(r0->lo_lsm, i,
337 file_start, file_end, &start, &end)) {
338 struct cl_lock_descr *descr;
340 descr = &lck->lls_sub[nr].sub_descr;
342 LASSERT(descr->cld_obj == NULL);
343 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
344 descr->cld_start = cl_index(descr->cld_obj, start);
345 descr->cld_end = cl_index(descr->cld_obj, end);
346 descr->cld_mode = parent->cll_descr.cld_mode;
347 descr->cld_gid = parent->cll_descr.cld_gid;
348 descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
349 /* XXX has no effect */
350 lck->lls_sub[nr].sub_got = *descr;
351 lck->lls_sub[nr].sub_stripe = i;
355 LASSERT(nr == lck->lls_nr);
357 * Then, create sub-locks. Once at least one sub-lock was created,
358 * top-lock can be reached by other threads.
360 for (i = 0; i < lck->lls_nr; ++i) {
361 struct cl_lock *sublock;
362 struct lov_lock_link *link;
364 if (lck->lls_sub[i].sub_lock == NULL) {
365 sublock = lov_sublock_alloc(env, io, lck, i, &link);
366 if (IS_ERR(sublock)) {
367 result = PTR_ERR(sublock);
370 cl_lock_get_trust(sublock);
371 cl_lock_mutex_get(env, sublock);
372 cl_lock_mutex_get(env, parent);
374 * recheck under mutex that sub-lock wasn't created
375 * concurrently, and that top-lock is still alive.
377 if (lck->lls_sub[i].sub_lock == NULL &&
378 parent->cll_state < CLS_FREEING) {
379 lov_sublock_adopt(env, lck, sublock, i, link);
380 cl_lock_mutex_put(env, parent);
382 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
383 cl_lock_mutex_put(env, parent);
384 cl_lock_unhold(env, sublock,
385 "lov-parent", parent);
387 cl_lock_mutex_put(env, sublock);
388 cl_lock_put(env, sublock);
392 * Some sub-locks can be missing at this point. This is not a problem,
393 * because enqueue will create them anyway. Main duty of this function
394 * is to fill in sub-lock descriptions in a race free manner.
399 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
400 int i, int deluser, int rc)
402 struct cl_lock *parent = lck->lls_cl.cls_lock;
404 LASSERT(cl_lock_is_mutexed(parent));
407 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
408 struct cl_lock *sublock;
411 LASSERT(lck->lls_sub[i].sub_lock != NULL);
412 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
413 LASSERT(cl_lock_is_mutexed(sublock));
415 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
417 cl_lock_user_del(env, sublock);
419 * If the last hold is released, and cancellation is pending
420 * for a sub-lock, release parent mutex, to avoid keeping it
421 * while sub-lock is being paged out.
423 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
424 sublock->cll_descr.cld_mode == CLM_GROUP ||
425 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
426 sublock->cll_holds == 1;
428 cl_lock_mutex_put(env, parent);
429 cl_lock_unhold(env, sublock, "lov-parent", parent);
431 cl_lock_mutex_get(env, parent);
432 rc = lov_subresult(rc, CLO_REPEAT);
435 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
436 * not backed by a reference on a
437 * sub-lock. lovsub_lock_delete() will clear
438 * lck->lls_sub[i].sub_lock under semaphores, just before
439 * sub-lock is destroyed.
445 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
448 struct cl_lock *parent = lck->lls_cl.cls_lock;
450 LASSERT(cl_lock_is_mutexed(parent));
453 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
454 struct cl_lock *sublock;
456 LASSERT(lck->lls_sub[i].sub_lock != NULL);
457 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
458 LASSERT(cl_lock_is_mutexed(sublock));
459 LASSERT(sublock->cll_state != CLS_FREEING);
461 lck->lls_sub[i].sub_flags |= LSF_HELD;
463 cl_lock_get_trust(sublock);
464 cl_lock_hold_add(env, sublock, "lov-parent", parent);
465 cl_lock_user_add(env, sublock);
466 cl_lock_put(env, sublock);
471 static void lov_lock_fini(const struct lu_env *env,
472 struct cl_lock_slice *slice)
474 struct lov_lock *lck;
478 lck = cl2lov_lock(slice);
479 LASSERT(lck->lls_nr_filled == 0);
480 if (lck->lls_sub != NULL) {
481 for (i = 0; i < lck->lls_nr; ++i)
483 * No sub-locks exists at this point, as sub-lock has
484 * a reference on its parent.
486 LASSERT(lck->lls_sub[i].sub_lock == NULL);
487 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
489 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
494 * Tries to advance a state machine of a given sub-lock toward enqueuing of
497 * \retval 0 if state-transition can proceed
498 * \retval -ve otherwise.
500 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
501 struct cl_lock *sublock,
502 struct cl_io *io, __u32 enqflags, int last)
507 /* first, try to enqueue a sub-lock ... */
508 result = cl_enqueue_try(env, sublock, io, enqflags);
509 if (sublock->cll_state == CLS_ENQUEUED)
510 /* if it is enqueued, try to `wait' on it---maybe it's already
512 result = cl_wait_try(env, sublock);
514 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
515 * parallel, otherwise---enqueue has to wait until sub-lock is granted
516 * before proceeding to the next one.
518 if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
519 enqflags & CEF_ASYNC && !last)
525 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
527 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
528 struct cl_io *io, struct lov_lock *lck, int idx)
530 struct lov_lock_link *link;
531 struct cl_lock *sublock;
534 LASSERT(parent->cll_depth == 1);
535 cl_lock_mutex_put(env, parent);
536 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
537 if (!IS_ERR(sublock))
538 cl_lock_mutex_get(env, sublock);
539 cl_lock_mutex_get(env, parent);
541 if (!IS_ERR(sublock)) {
542 cl_lock_get_trust(sublock);
543 if (parent->cll_state == CLS_QUEUING &&
544 lck->lls_sub[idx].sub_lock == NULL) {
545 lov_sublock_adopt(env, lck, sublock, idx, link);
547 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
548 /* other thread allocated sub-lock, or enqueue is no
550 cl_lock_mutex_put(env, parent);
551 cl_lock_unhold(env, sublock, "lov-parent", parent);
552 cl_lock_mutex_get(env, parent);
554 cl_lock_mutex_put(env, sublock);
555 cl_lock_put(env, sublock);
558 result = PTR_ERR(sublock);
563 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
564 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
565 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
566 * state machines in the face of sub-locks sharing (by multiple top-locks),
567 * and concurrent sub-lock cancellations.
569 static int lov_lock_enqueue(const struct lu_env *env,
570 const struct cl_lock_slice *slice,
571 struct cl_io *io, __u32 enqflags)
573 struct cl_lock *lock = slice->cls_lock;
574 struct lov_lock *lck = cl2lov_lock(slice);
575 struct cl_lock_closure *closure = lov_closure_get(env, lock);
578 enum cl_lock_state minstate;
582 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
584 struct lovsub_lock *sub;
585 struct lov_lock_sub *lls;
586 struct cl_lock *sublock;
587 struct lov_sublock_env *subenv;
589 if (lock->cll_state != CLS_QUEUING) {
591 * Lock might have left QUEUING state if previous
592 * iteration released its mutex. Stop enqueing in this
593 * case and let the upper layer to decide what to do.
595 LASSERT(i > 0 && result != 0);
599 lls = &lck->lls_sub[i];
602 * Sub-lock might have been canceled, while top-lock was
606 result = lov_sublock_fill(env, lock, io, lck, i);
607 /* lov_sublock_fill() released @lock mutex,
611 sublock = sub->lss_cl.cls_lock;
612 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
614 lov_sublock_hold(env, lck, i);
615 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
616 subenv->lse_io, enqflags,
617 i == lck->lls_nr - 1);
618 minstate = min(minstate, sublock->cll_state);
620 * Don't hold a sub-lock in CLS_CACHED state, see
621 * description for lov_lock::lls_sub.
623 if (sublock->cll_state > CLS_HELD)
624 rc = lov_sublock_release(env, lck, i, 1, rc);
625 lov_sublock_unlock(env, sub, closure, subenv);
627 result = lov_subresult(result, rc);
631 cl_lock_closure_fini(closure);
632 RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
635 static int lov_lock_unuse(const struct lu_env *env,
636 const struct cl_lock_slice *slice)
638 struct lov_lock *lck = cl2lov_lock(slice);
639 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
645 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
647 struct lovsub_lock *sub;
648 struct cl_lock *sublock;
649 struct lov_lock_sub *lls;
650 struct lov_sublock_env *subenv;
652 /* top-lock state cannot change concurrently, because single
653 * thread (one that released the last hold) carries unlocking
654 * to the completion. */
655 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
656 lls = &lck->lls_sub[i];
661 sublock = sub->lss_cl.cls_lock;
662 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
664 if (lls->sub_flags & LSF_HELD) {
665 LASSERT(sublock->cll_state == CLS_HELD);
666 rc = cl_unuse_try(subenv->lse_env, sublock);
668 rc = lov_sublock_release(env, lck,
671 lov_sublock_unlock(env, sub, closure, subenv);
673 result = lov_subresult(result, rc);
678 if (result == 0 && lck->lls_cancel_race) {
679 lck->lls_cancel_race = 0;
682 cl_lock_closure_fini(closure);
687 static void lov_lock_cancel(const struct lu_env *env,
688 const struct cl_lock_slice *slice)
690 struct lov_lock *lck = cl2lov_lock(slice);
691 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
697 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
699 struct lovsub_lock *sub;
700 struct cl_lock *sublock;
701 struct lov_lock_sub *lls;
702 struct lov_sublock_env *subenv;
704 /* top-lock state cannot change concurrently, because single
705 * thread (one that released the last hold) carries unlocking
706 * to the completion. */
707 lls = &lck->lls_sub[i];
712 sublock = sub->lss_cl.cls_lock;
713 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
715 if (lls->sub_flags & LSF_HELD) {
716 if (sublock->cll_state == CLS_HELD) {
717 rc = cl_unuse_try(subenv->lse_env,
719 lov_sublock_release(env, lck, i, 0, 0);
721 lov_sublock_release(env, lck, i, 1, 0);
724 lov_sublock_unlock(env, sub, closure, subenv);
726 result = lov_subresult(result, rc);
731 cl_lock_closure_fini(closure);
736 static int lov_lock_wait(const struct lu_env *env,
737 const struct cl_lock_slice *slice)
739 struct lov_lock *lck = cl2lov_lock(slice);
740 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
741 enum cl_lock_state minstate;
747 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
749 struct lovsub_lock *sub;
750 struct cl_lock *sublock;
751 struct lov_lock_sub *lls;
752 struct lov_sublock_env *subenv;
754 lls = &lck->lls_sub[i];
756 LASSERT(sub != NULL);
757 sublock = sub->lss_cl.cls_lock;
758 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
760 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
761 if (sublock->cll_state < CLS_HELD)
762 rc = cl_wait_try(env, sublock);
764 minstate = min(minstate, sublock->cll_state);
765 lov_sublock_unlock(env, sub, closure, subenv);
767 result = lov_subresult(result, rc);
771 cl_lock_closure_fini(closure);
772 RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
775 static int lov_lock_use(const struct lu_env *env,
776 const struct cl_lock_slice *slice)
778 struct lov_lock *lck = cl2lov_lock(slice);
779 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
783 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
786 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
788 struct lovsub_lock *sub;
789 struct cl_lock *sublock;
790 struct lov_lock_sub *lls;
791 struct lov_sublock_env *subenv;
793 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
795 lls = &lck->lls_sub[i];
799 * Sub-lock might have been canceled, while top-lock was
806 sublock = sub->lss_cl.cls_lock;
807 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
809 LASSERT(sublock->cll_state != CLS_FREEING);
810 lov_sublock_hold(env, lck, i);
811 if (sublock->cll_state == CLS_CACHED) {
812 rc = cl_use_try(subenv->lse_env, sublock, 0);
814 rc = lov_sublock_release(env, lck,
817 lov_sublock_unlock(env, sub, closure, subenv);
819 result = lov_subresult(result, rc);
824 if (lck->lls_cancel_race) {
826 * If there is unlocking happened at the same time, then
827 * sublock_lock state should be FREEING, and lov_sublock_lock
828 * should return CLO_REPEAT. In this case, it should return
829 * ESTALE, and up layer should reset the lock state to be NEW.
831 lck->lls_cancel_race = 0;
832 LASSERT(result != 0);
835 cl_lock_closure_fini(closure);
840 static int lock_lock_multi_match()
842 struct cl_lock *lock = slice->cls_lock;
843 struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
844 struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
845 struct lov_layout_raid0 *r0 = lov_r0(loo);
846 struct lov_lock_sub *sub;
847 struct cl_object *subobj;
854 fstart = cl_offset(need->cld_obj, need->cld_start);
855 fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
856 subneed->cld_mode = need->cld_mode;
857 cl_lock_mutex_get(env, lock);
858 for (i = 0; i < lov->lls_nr; ++i) {
859 sub = &lov->lls_sub[i];
860 if (sub->sub_lock == NULL)
862 subobj = sub->sub_descr.cld_obj;
863 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
864 fstart, fend, &start, &end))
866 subneed->cld_start = cl_index(subobj, start);
867 subneed->cld_end = cl_index(subobj, end);
868 subneed->cld_obj = subobj;
869 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
874 cl_lock_mutex_put(env, lock);
879 * Check if the extent region \a descr is covered by \a child against the
880 * specific \a stripe.
882 static int lov_lock_stripe_is_matching(const struct lu_env *env,
883 struct lov_object *lov, int stripe,
884 const struct cl_lock_descr *child,
885 const struct cl_lock_descr *descr)
887 struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
892 if (lov_r0(lov)->lo_nr == 1)
893 return cl_lock_ext_match(child, descr);
896 * For a multi-stripes object:
897 * - make sure the descr only covers child's stripe, and
898 * - check if extent is matching.
900 start = cl_offset(&lov->lo_cl, descr->cld_start);
901 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
902 result = end - start <= lsm->lsm_stripe_size &&
903 stripe == lov_stripe_number(lsm, start) &&
904 stripe == lov_stripe_number(lsm, end);
906 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
910 subd->cld_obj = NULL; /* don't need sub object at all */
911 subd->cld_mode = descr->cld_mode;
912 subd->cld_gid = descr->cld_gid;
913 result = lov_stripe_intersects(lsm, stripe, start, end,
914 &sub_start, &sub_end);
916 subd->cld_start = cl_index(child->cld_obj, sub_start);
917 subd->cld_end = cl_index(child->cld_obj, sub_end);
918 result = cl_lock_ext_match(child, subd);
924 * An implementation of cl_lock_operations::clo_fits_into() method.
926 * Checks whether a lock (given by \a slice) is suitable for \a
927 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
930 * \see ccc_lock_fits_into().
932 static int lov_lock_fits_into(const struct lu_env *env,
933 const struct cl_lock_slice *slice,
934 const struct cl_lock_descr *need,
935 const struct cl_io *io)
937 struct lov_lock *lov = cl2lov_lock(slice);
938 struct lov_object *obj = cl2lov(slice->cls_obj);
941 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
942 LASSERT(lov->lls_nr > 0);
946 if (need->cld_mode == CLM_GROUP)
948 * always allow to match group lock.
950 result = cl_lock_ext_match(&lov->lls_orig, need);
951 else if (lov->lls_nr == 1) {
952 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
953 result = lov_lock_stripe_is_matching(env,
954 cl2lov(slice->cls_obj),
955 lov->lls_sub[0].sub_stripe,
957 } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
958 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
960 * Multi-stripe locks are only suitable for `quick' IO and for
966 * Most general case: multi-stripe existing lock, and
967 * (potentially) multi-stripe @need lock. Check that @need is
968 * covered by @lov's sub-locks.
970 * For now, ignore lock expansions made by the server, and
971 * match against original lock extent.
973 result = cl_lock_ext_match(&lov->lls_orig, need);
974 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
975 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
976 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
981 void lov_lock_unlink(const struct lu_env *env,
982 struct lov_lock_link *link, struct lovsub_lock *sub)
984 struct lov_lock *lck = link->lll_super;
985 struct cl_lock *parent = lck->lls_cl.cls_lock;
987 LASSERT(cl_lock_is_mutexed(parent));
988 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
991 list_del_init(&link->lll_list);
992 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
993 /* yank this sub-lock from parent's array */
994 lck->lls_sub[link->lll_idx].sub_lock = NULL;
995 LASSERT(lck->lls_nr_filled > 0);
996 lck->lls_nr_filled--;
997 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
998 cl_lock_put(env, parent);
999 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1003 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1004 struct lov_lock *lck,
1005 struct lovsub_lock *sub)
1007 struct lov_lock_link *scan;
1009 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1012 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1013 if (scan->lll_super == lck)
1020 * An implementation of cl_lock_operations::clo_delete() method. This is
1021 * invoked for "top-to-bottom" delete, when lock destruction starts from the
1022 * top-lock, e.g., as a result of inode destruction.
1024 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1025 * this is done separately elsewhere:
1027 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
1028 * each sub-object, purging its locks;
1030 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1031 * left in the cache.
1033 static void lov_lock_delete(const struct lu_env *env,
1034 const struct cl_lock_slice *slice)
1036 struct lov_lock *lck = cl2lov_lock(slice);
1037 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1040 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1043 for (i = 0; i < lck->lls_nr; ++i) {
1044 struct lov_lock_sub *lls;
1045 struct lovsub_lock *lsl;
1046 struct cl_lock *sublock;
1049 lls = &lck->lls_sub[i];
1050 lsl = lls->sub_lock;
1054 sublock = lsl->lss_cl.cls_lock;
1055 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1057 if (lls->sub_flags & LSF_HELD)
1058 lov_sublock_release(env, lck, i, 1, 0);
1059 if (sublock->cll_state < CLS_FREEING) {
1060 struct lov_lock_link *link;
1062 link = lov_lock_link_find(env, lck, lsl);
1063 LASSERT(link != NULL);
1064 lov_lock_unlink(env, link, lsl);
1065 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1067 lov_sublock_unlock(env, lsl, closure, NULL);
1068 } else if (rc == CLO_REPEAT) {
1069 --i; /* repeat with this lock */
1071 CL_LOCK_DEBUG(D_ERROR, env, sublock,
1072 "Cannot get sub-lock for delete: %i\n",
1076 cl_lock_closure_fini(closure);
1080 static int lov_lock_print(const struct lu_env *env, void *cookie,
1081 lu_printer_t p, const struct cl_lock_slice *slice)
1083 struct lov_lock *lck = cl2lov_lock(slice);
1086 (*p)(env, cookie, "%d\n", lck->lls_nr);
1087 for (i = 0; i < lck->lls_nr; ++i) {
1088 struct lov_lock_sub *sub;
1090 sub = &lck->lls_sub[i];
1091 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
1092 if (sub->sub_lock != NULL)
1093 cl_lock_print(env, cookie, p,
1094 sub->sub_lock->lss_cl.cls_lock);
1096 (*p)(env, cookie, "---\n");
1101 static const struct cl_lock_operations lov_lock_ops = {
1102 .clo_fini = lov_lock_fini,
1103 .clo_enqueue = lov_lock_enqueue,
1104 .clo_wait = lov_lock_wait,
1105 .clo_use = lov_lock_use,
1106 .clo_unuse = lov_lock_unuse,
1107 .clo_cancel = lov_lock_cancel,
1108 .clo_fits_into = lov_lock_fits_into,
1109 .clo_delete = lov_lock_delete,
1110 .clo_print = lov_lock_print
1113 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1114 struct cl_lock *lock, const struct cl_io *io)
1116 struct lov_lock *lck;
1120 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1122 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1123 result = lov_lock_sub_init(env, lck, io);
1129 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1130 struct cl_lock *parent)
1132 struct cl_lock_closure *closure;
1134 closure = &lov_env_info(env)->lti_closure;
1135 LASSERT(list_empty(&closure->clc_list));
1136 cl_lock_closure_init(env, closure, parent, 1);