1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
45 /** \addtogroup lov lov @{ */
47 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
48 struct cl_lock *parent);
50 /*****************************************************************************
52 * Lov lock operations.
56 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
57 struct cl_lock *parent,
58 struct lov_lock_sub *lls)
60 struct lov_sublock_env *subenv;
61 struct lov_io *lio = lov_env_io(env);
62 struct cl_io *io = lio->lis_cl.cis_io;
63 struct lov_io_sub *sub;
65 subenv = &lov_env_session(env)->ls_subenv;
68 * FIXME: We tend to use the subio's env & io to call the sublock
69 * lock operations because osc lock sometimes stores some control
70 * variables in thread's IO infomation(Now only lockless information).
71 * However, if the lock's host(object) is different from the object
72 * for current IO, we have no way to get the subenv and subio because
73 * they are not initialized at all. As a temp fix, in this case,
74 * we still borrow the parent's env to call sublock operations.
76 if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
77 subenv->lse_env = env;
79 subenv->lse_sub = NULL;
82 sub = lov_sub_get(env, lio, lls->sub_stripe);
84 subenv->lse_env = sub->sub_env;
85 subenv->lse_io = sub->sub_io;
86 subenv->lse_sub = sub;
94 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
96 if (subenv && subenv->lse_sub)
97 lov_sub_put(subenv->lse_sub);
100 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
101 struct cl_lock *sublock, int idx,
102 struct lov_lock_link *link)
104 struct lovsub_lock *lsl;
105 struct cl_lock *parent = lck->lls_cl.cls_lock;
108 LASSERT(cl_lock_is_mutexed(parent));
109 LASSERT(cl_lock_is_mutexed(sublock));
112 lsl = cl2sub_lock(sublock);
114 * check that sub-lock doesn't have lock link to this top-lock.
116 LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
117 LASSERT(idx < lck->lls_nr);
119 lck->lls_sub[idx].sub_lock = lsl;
120 lck->lls_nr_filled++;
121 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
122 list_add_tail(&link->lll_list, &lsl->lss_parents);
124 link->lll_super = lck;
126 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
127 lck->lls_sub[idx].sub_flags |= LSF_HELD;
128 cl_lock_user_add(env, sublock);
130 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
131 LASSERT(rc == 0); /* there is no way this can fail, currently */
135 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
136 const struct cl_io *io,
137 struct lov_lock *lck,
138 int idx, struct lov_lock_link **out)
140 struct cl_lock *sublock;
141 struct cl_lock *parent;
142 struct lov_lock_link *link;
144 LASSERT(idx < lck->lls_nr);
147 OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
149 struct lov_sublock_env *subenv;
150 struct lov_lock_sub *lls;
151 struct cl_lock_descr *descr;
153 parent = lck->lls_cl.cls_lock;
154 lls = &lck->lls_sub[idx];
155 descr = &lls->sub_descr;
157 subenv = lov_sublock_env_get(env, parent, lls);
158 if (!IS_ERR(subenv)) {
159 /* CAVEAT: Don't try to add a field in lov_lock_sub
160 * to remember the subio. This is because lock is able
161 * to be cached, but this is not true for IO. This
162 * further means a sublock might be referenced in
163 * different io context. -jay */
165 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166 descr, "lov-parent", parent);
167 lov_sublock_env_put(subenv);
170 sublock = (void*)subenv;
173 if (!IS_ERR(sublock))
176 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
178 sublock = ERR_PTR(-ENOMEM);
182 static void lov_sublock_unlock(const struct lu_env *env,
183 struct lovsub_lock *lsl,
184 struct cl_lock_closure *closure,
185 struct lov_sublock_env *subenv)
188 lov_sublock_env_put(subenv);
189 lsl->lss_active = NULL;
190 cl_lock_disclosure(env, closure);
194 static int lov_sublock_lock(const struct lu_env *env,
195 struct lov_lock_sub *lls,
196 struct cl_lock_closure *closure,
197 struct lov_sublock_env **lsep)
199 struct cl_lock *child;
203 LASSERT(list_empty(&closure->clc_list));
205 child = lls->sub_lock->lss_cl.cls_lock;
206 result = cl_lock_closure_build(env, child, closure);
208 struct cl_lock *parent = closure->clc_origin;
210 LASSERT(cl_lock_is_mutexed(child));
211 lls->sub_lock->lss_active = parent;
214 struct lov_sublock_env *subenv;
215 subenv = lov_sublock_env_get(env, parent, lls);
216 if (IS_ERR(subenv)) {
217 lov_sublock_unlock(env, lls->sub_lock,
219 result = PTR_ERR(subenv);
229 * Updates the result of a top-lock operation from a result of sub-lock
230 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
231 * over sub-locks and lov_subresult() is used to calculate return value of a
232 * top-operation. To this end, possible return values of sub-operations are
236 * - CLO_WAIT wait for event
237 * - CLO_REPEAT repeat top-operation
238 * - -ne fundamental error
240 * Top-level return code can only go down through this list. CLO_REPEAT
241 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
242 * has to be rechecked by the upper layer.
244 static int lov_subresult(int result, int rc)
249 LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
250 LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
251 CLASSERT(CLO_WAIT < CLO_REPEAT);
255 /* calculate ranks in the ordering above */
256 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
257 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
259 if (result_rank < rc_rank)
265 * Creates sub-locks for a given lov_lock for the first time.
267 * Goes through all sub-objects of top-object, and creates sub-locks on every
268 * sub-object intersecting with top-lock extent. This is complicated by the
269 * fact that top-lock (that is being created) can be accessed concurrently
270 * through already created sub-locks (possibly shared with other top-locks).
272 static int lov_lock_sub_init(const struct lu_env *env,
273 struct lov_lock *lck, const struct cl_io *io)
286 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
287 struct lov_layout_raid0 *r0 = lov_r0(loo);
288 struct cl_lock *parent = lck->lls_cl.cls_lock;
292 lck->lls_orig = parent->cll_descr;
293 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
294 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
296 start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
297 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
299 * XXX for wide striping smarter algorithm is desirable,
300 * breaking out of the loop, early.
302 stripe = (start_stripe + i) % r0->lo_nr;
303 if (lov_stripe_intersects(r0->lo_lsm, stripe,
304 file_start, file_end, &start, &end))
308 OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
309 if (lck->lls_sub == NULL)
314 * First, fill in sub-lock descriptions in
315 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
316 * (called below in this function, and by lov_lock_enqueue()) to
317 * create sub-locks. At this moment, no other thread can access
320 for (j = 0, nr = 0; j < i; ++j) {
321 stripe = (start_stripe + j) % r0->lo_nr;
322 if (lov_stripe_intersects(r0->lo_lsm, stripe,
323 file_start, file_end, &start, &end)) {
324 struct cl_lock_descr *descr;
326 descr = &lck->lls_sub[nr].sub_descr;
328 LASSERT(descr->cld_obj == NULL);
329 descr->cld_obj = lovsub2cl(r0->lo_sub[stripe]);
330 descr->cld_start = cl_index(descr->cld_obj, start);
331 descr->cld_end = cl_index(descr->cld_obj, end);
332 descr->cld_mode = parent->cll_descr.cld_mode;
333 /* XXX has no effect */
334 lck->lls_sub[nr].sub_got = *descr;
335 lck->lls_sub[nr].sub_stripe = stripe;
339 LASSERT(nr == lck->lls_nr);
341 * Then, create sub-locks. Once at least one sub-lock was created,
342 * top-lock can be reached by other threads.
344 for (i = 0; i < lck->lls_nr; ++i) {
345 struct cl_lock *sublock;
346 struct lov_lock_link *link;
348 if (lck->lls_sub[i].sub_lock == NULL) {
349 sublock = lov_sublock_alloc(env, io, lck, i, &link);
350 if (IS_ERR(sublock)) {
351 result = PTR_ERR(sublock);
354 cl_lock_mutex_get(env, sublock);
355 cl_lock_mutex_get(env, parent);
357 * recheck under mutex that sub-lock wasn't created
358 * concurrently, and that top-lock is still alive.
360 if (lck->lls_sub[i].sub_lock == NULL &&
361 parent->cll_state < CLS_FREEING) {
362 lov_sublock_adopt(env, lck, sublock, i, link);
363 cl_lock_mutex_put(env, parent);
365 cl_lock_mutex_put(env, parent);
366 cl_lock_unhold(env, sublock,
367 "lov-parent", parent);
369 cl_lock_mutex_put(env, sublock);
373 * Some sub-locks can be missing at this point. This is not a problem,
374 * because enqueue will create them anyway. Main duty of this function
375 * is to fill in sub-lock descriptions in a race free manner.
380 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
381 int i, int deluser, int rc)
383 struct cl_lock *parent = lck->lls_cl.cls_lock;
385 LASSERT(cl_lock_is_mutexed(parent));
388 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
389 struct cl_lock *sublock;
392 LASSERT(lck->lls_sub[i].sub_lock != NULL);
393 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
394 LASSERT(cl_lock_is_mutexed(sublock));
396 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
398 cl_lock_user_del(env, sublock);
400 * If the last hold is released, and cancellation is pending
401 * for a sub-lock, release parent mutex, to avoid keeping it
402 * while sub-lock is being paged out.
404 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
405 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
406 sublock->cll_holds == 1;
408 cl_lock_mutex_put(env, parent);
409 cl_lock_unhold(env, sublock, "lov-parent", parent);
411 cl_lock_mutex_get(env, parent);
412 rc = lov_subresult(rc, CLO_REPEAT);
415 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
416 * not backed by a reference on a
417 * sub-lock. lovsub_lock_delete() will clear
418 * lck->lls_sub[i].sub_lock under semaphores, just before
419 * sub-lock is destroyed.
425 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
428 struct cl_lock *parent = lck->lls_cl.cls_lock;
430 LASSERT(cl_lock_is_mutexed(parent));
433 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
434 struct cl_lock *sublock;
436 LASSERT(lck->lls_sub[i].sub_lock != NULL);
437 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
438 LASSERT(cl_lock_is_mutexed(sublock));
439 LASSERT(sublock->cll_state != CLS_FREEING);
441 lck->lls_sub[i].sub_flags |= LSF_HELD;
443 cl_lock_get_trust(sublock);
444 cl_lock_hold_add(env, sublock, "lov-parent", parent);
445 cl_lock_user_add(env, sublock);
446 cl_lock_put(env, sublock);
451 static void lov_lock_fini(const struct lu_env *env,
452 struct cl_lock_slice *slice)
454 struct lov_lock *lck;
458 lck = cl2lov_lock(slice);
459 LASSERT(lck->lls_nr_filled == 0);
460 if (lck->lls_sub != NULL) {
461 for (i = 0; i < lck->lls_nr; ++i)
463 * No sub-locks exists at this point, as sub-lock has
464 * a reference on its parent.
466 LASSERT(lck->lls_sub[i].sub_lock == NULL);
467 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
469 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
474 * Tries to advance a state machine of a given sub-lock toward enqueuing of
477 * \retval 0 if state-transition can proceed
478 * \retval -ve otherwise.
480 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
481 struct cl_lock *sublock,
482 struct cl_io *io, __u32 enqflags, int last)
487 /* first, try to enqueue a sub-lock ... */
488 result = cl_enqueue_try(env, sublock, io, enqflags);
489 if (sublock->cll_state == CLS_ENQUEUED)
490 /* if it is enqueued, try to `wait' on it---maybe it's already
492 result = cl_wait_try(env, sublock);
494 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
495 * parallel, otherwise---enqueue has to wait until sub-lock is granted
496 * before proceeding to the next one.
498 if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
499 enqflags & CEF_ASYNC && !last)
505 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
507 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
508 struct cl_io *io, struct lov_lock *lck, int idx)
510 struct lov_lock_link *link;
511 struct cl_lock *sublock;
514 LASSERT(parent->cll_depth == 1);
515 cl_lock_mutex_put(env, parent);
516 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
517 if (!IS_ERR(sublock))
518 cl_lock_mutex_get(env, sublock);
519 cl_lock_mutex_get(env, parent);
521 if (!IS_ERR(sublock)) {
522 if (parent->cll_state == CLS_QUEUING &&
523 lck->lls_sub[idx].sub_lock == NULL)
524 lov_sublock_adopt(env, lck, sublock, idx, link);
526 /* other thread allocated sub-lock, or enqueue is no
528 cl_lock_mutex_put(env, parent);
529 cl_lock_unhold(env, sublock, "lov-parent", parent);
530 cl_lock_mutex_get(env, parent);
532 cl_lock_mutex_put(env, sublock);
535 result = PTR_ERR(sublock);
540 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
541 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
542 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
543 * state machines in the face of sub-locks sharing (by multiple top-locks),
544 * and concurrent sub-lock cancellations.
546 static int lov_lock_enqueue(const struct lu_env *env,
547 const struct cl_lock_slice *slice,
548 struct cl_io *io, __u32 enqflags)
550 struct cl_lock *lock = slice->cls_lock;
551 struct lov_lock *lck = cl2lov_lock(slice);
552 struct cl_lock_closure *closure = lov_closure_get(env, lock);
555 enum cl_lock_state minstate;
559 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
561 struct lovsub_lock *sub;
562 struct lov_lock_sub *lls;
563 struct cl_lock *sublock;
564 struct lov_sublock_env *subenv;
566 if (lock->cll_state != CLS_QUEUING) {
568 * Lock might have left QUEUING state if previous
569 * iteration released its mutex. Stop enqueing in this
570 * case and let the upper layer to decide what to do.
572 LASSERT(i > 0 && result != 0);
576 lls = &lck->lls_sub[i];
579 * Sub-lock might have been canceled, while top-lock was
583 result = lov_sublock_fill(env, lock, io, lck, i);
584 /* lov_sublock_fill() released @lock mutex,
588 sublock = sub->lss_cl.cls_lock;
589 rc = lov_sublock_lock(env, lls, closure, &subenv);
591 lov_sublock_hold(env, lck, i);
592 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
593 subenv->lse_io, enqflags,
594 i == lck->lls_nr - 1);
595 minstate = min(minstate, sublock->cll_state);
597 * Don't hold a sub-lock in CLS_CACHED state, see
598 * description for lov_lock::lls_sub.
600 if (sublock->cll_state > CLS_HELD)
601 rc = lov_sublock_release(env, lck, i, 1, rc);
602 lov_sublock_unlock(env, sub, closure, subenv);
604 result = lov_subresult(result, rc);
608 cl_lock_closure_fini(closure);
609 RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
612 static int lov_lock_unuse(const struct lu_env *env,
613 const struct cl_lock_slice *slice)
615 struct lov_lock *lck = cl2lov_lock(slice);
616 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
622 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
624 struct lovsub_lock *sub;
625 struct cl_lock *sublock;
626 struct lov_lock_sub *lls;
627 struct lov_sublock_env *subenv;
629 /* top-lock state cannot change concurrently, because single
630 * thread (one that released the last hold) carries unlocking
631 * to the completion. */
632 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
633 lls = &lck->lls_sub[i];
638 sublock = sub->lss_cl.cls_lock;
639 rc = lov_sublock_lock(env, lls, closure, &subenv);
641 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
642 LASSERT(sublock->cll_state == CLS_HELD);
643 rc = cl_unuse_try(subenv->lse_env, sublock);
645 rc = lov_sublock_release(env, lck,
648 lov_sublock_unlock(env, sub, closure, subenv);
650 result = lov_subresult(result, rc);
654 if (result == 0 && lck->lls_unuse_race) {
655 lck->lls_unuse_race = 0;
658 cl_lock_closure_fini(closure);
662 static int lov_lock_wait(const struct lu_env *env,
663 const struct cl_lock_slice *slice)
665 struct lov_lock *lck = cl2lov_lock(slice);
666 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
667 enum cl_lock_state minstate;
673 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
675 struct lovsub_lock *sub;
676 struct cl_lock *sublock;
677 struct lov_lock_sub *lls;
678 struct lov_sublock_env *subenv;
680 lls = &lck->lls_sub[i];
682 LASSERT(sub != NULL);
683 sublock = sub->lss_cl.cls_lock;
684 rc = lov_sublock_lock(env, lls, closure, &subenv);
686 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
687 if (sublock->cll_state < CLS_HELD)
688 rc = cl_wait_try(env, sublock);
690 minstate = min(minstate, sublock->cll_state);
691 lov_sublock_unlock(env, sub, closure, subenv);
693 result = lov_subresult(result, rc);
697 cl_lock_closure_fini(closure);
698 RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
701 static int lov_lock_use(const struct lu_env *env,
702 const struct cl_lock_slice *slice)
704 struct lov_lock *lck = cl2lov_lock(slice);
705 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
709 LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
712 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
714 struct lovsub_lock *sub;
715 struct cl_lock *sublock;
716 struct lov_lock_sub *lls;
717 struct lov_sublock_env *subenv;
719 if (slice->cls_lock->cll_state != CLS_CACHED) {
720 /* see comment in lov_lock_enqueue(). */
721 LASSERT(i > 0 && result != 0);
725 * if a sub-lock was destroyed while top-lock was in
726 * CLS_CACHED state, top-lock would have been moved into
727 * CLS_NEW state, so all sub-locks have to be in place.
729 lls = &lck->lls_sub[i];
731 LASSERT(sub != NULL);
732 sublock = sub->lss_cl.cls_lock;
733 rc = lov_sublock_lock(env, lls, closure, &subenv);
735 LASSERT(sublock->cll_state != CLS_FREEING);
736 lov_sublock_hold(env, lck, i);
737 if (sublock->cll_state == CLS_CACHED) {
738 rc = cl_use_try(subenv->lse_env, sublock);
740 rc = lov_sublock_release(env, lck,
744 lov_sublock_unlock(env, sub, closure, subenv);
746 result = lov_subresult(result, rc);
750 cl_lock_closure_fini(closure);
755 static int lock_lock_multi_match()
757 struct cl_lock *lock = slice->cls_lock;
758 struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
759 struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
760 struct lov_layout_raid0 *r0 = lov_r0(loo);
761 struct lov_lock_sub *sub;
762 struct cl_object *subobj;
769 fstart = cl_offset(need->cld_obj, need->cld_start);
770 fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
771 subneed->cld_mode = need->cld_mode;
772 cl_lock_mutex_get(env, lock);
773 for (i = 0; i < lov->lls_nr; ++i) {
774 sub = &lov->lls_sub[i];
775 if (sub->sub_lock == NULL)
777 subobj = sub->sub_descr.cld_obj;
778 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
779 fstart, fend, &start, &end))
781 subneed->cld_start = cl_index(subobj, start);
782 subneed->cld_end = cl_index(subobj, end);
783 subneed->cld_obj = subobj;
784 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
789 cl_lock_mutex_put(env, lock);
794 * Check if the extent region \a descr is covered by \a child against the
795 * specific \a stripe.
797 static int lov_lock_stripe_is_matching(const struct lu_env *env,
798 struct lov_object *lov, int stripe,
799 const struct cl_lock_descr *child,
800 const struct cl_lock_descr *descr)
802 struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
807 if (lov_r0(lov)->lo_nr == 1)
808 return cl_lock_ext_match(child, descr);
811 * For a multi-stripes object:
812 * - make sure the descr only covers child's stripe, and
813 * - check if extent is matching.
815 start = cl_offset(&lov->lo_cl, descr->cld_start);
816 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
817 result = end - start <= lsm->lsm_stripe_size &&
818 stripe == lov_stripe_number(lsm, start) &&
819 stripe == lov_stripe_number(lsm, end);
821 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
825 subd->cld_obj = NULL; /* don't need sub object at all */
826 subd->cld_mode = descr->cld_mode;
827 result = lov_stripe_intersects(lsm, stripe, start, end,
828 &sub_start, &sub_end);
830 subd->cld_start = cl_index(child->cld_obj, sub_start);
831 subd->cld_end = cl_index(child->cld_obj, sub_end);
832 result = cl_lock_ext_match(child, subd);
838 * An implementation of cl_lock_operations::clo_fits_into() method.
840 * Checks whether a lock (given by \a slice) is suitable for \a
841 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
844 * \see ccc_lock_fits_into().
846 static int lov_lock_fits_into(const struct lu_env *env,
847 const struct cl_lock_slice *slice,
848 const struct cl_lock_descr *need,
849 const struct cl_io *io)
851 struct lov_lock *lov = cl2lov_lock(slice);
852 struct lov_object *obj = cl2lov(slice->cls_obj);
855 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
856 LASSERT(lov->lls_nr > 0);
860 if (lov->lls_nr == 1) {
861 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
862 result = lov_lock_stripe_is_matching(env,
863 cl2lov(slice->cls_obj),
864 lov->lls_sub[0].sub_stripe,
866 } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
867 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
869 * Multi-stripe locks are only suitable for `quick' IO and for
875 * Most general case: multi-stripe existing lock, and
876 * (potentially) multi-stripe @need lock. Check that @need is
877 * covered by @lov's sub-locks.
879 * For now, ignore lock expansions made by the server, and
880 * match against original lock extent.
882 result = cl_lock_ext_match(&lov->lls_orig, need);
883 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
884 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
885 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
890 void lov_lock_unlink(const struct lu_env *env,
891 struct lov_lock_link *link, struct lovsub_lock *sub)
893 struct lov_lock *lck = link->lll_super;
894 struct cl_lock *parent = lck->lls_cl.cls_lock;
896 LASSERT(cl_lock_is_mutexed(parent));
897 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
900 list_del_init(&link->lll_list);
901 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
902 /* yank this sub-lock from parent's array */
903 lck->lls_sub[link->lll_idx].sub_lock = NULL;
904 LASSERT(lck->lls_nr_filled > 0);
905 lck->lls_nr_filled--;
906 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
907 cl_lock_put(env, parent);
908 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
912 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
913 struct lov_lock *lck,
914 struct lovsub_lock *sub)
916 struct lov_lock_link *scan;
918 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
921 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
922 if (scan->lll_super == lck)
929 * An implementation of cl_lock_operations::clo_delete() method. This is
930 * invoked for "top-to-bottom" delete, when lock destruction starts from the
931 * top-lock, e.g., as a result of inode destruction.
933 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
934 * this is done separately elsewhere:
936 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
937 * each sub-object, purging its locks;
939 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
942 static void lov_lock_delete(const struct lu_env *env,
943 const struct cl_lock_slice *slice)
945 struct lov_lock *lck = cl2lov_lock(slice);
946 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
949 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
952 for (i = 0; i < lck->lls_nr; ++i) {
953 struct lov_lock_sub *lls;
954 struct lovsub_lock *lsl;
955 struct cl_lock *sublock;
958 lls = &lck->lls_sub[i];
963 sublock = lsl->lss_cl.cls_lock;
964 rc = lov_sublock_lock(env, lls, closure, NULL);
966 if (lck->lls_sub[i].sub_flags & LSF_HELD)
967 lov_sublock_release(env, lck, i, 1, 0);
968 if (sublock->cll_state < CLS_FREEING) {
969 struct lov_lock_link *link;
971 link = lov_lock_link_find(env, lck, lsl);
972 LASSERT(link != NULL);
973 lov_lock_unlink(env, link, lsl);
974 LASSERT(lck->lls_sub[i].sub_lock == NULL);
976 lov_sublock_unlock(env, lsl, closure, NULL);
977 } else if (rc == CLO_REPEAT) {
978 --i; /* repeat with this lock */
980 CL_LOCK_DEBUG(D_ERROR, env, sublock,
981 "Cannot get sub-lock for delete: %i\n",
985 cl_lock_closure_fini(closure);
989 static int lov_lock_print(const struct lu_env *env, void *cookie,
990 lu_printer_t p, const struct cl_lock_slice *slice)
992 struct lov_lock *lck = cl2lov_lock(slice);
995 (*p)(env, cookie, "%d\n", lck->lls_nr);
996 for (i = 0; i < lck->lls_nr; ++i) {
997 struct lov_lock_sub *sub;
999 sub = &lck->lls_sub[i];
1000 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
1001 if (sub->sub_lock != NULL)
1002 cl_lock_print(env, cookie, p,
1003 sub->sub_lock->lss_cl.cls_lock);
1005 (*p)(env, cookie, "---\n");
1010 static const struct cl_lock_operations lov_lock_ops = {
1011 .clo_fini = lov_lock_fini,
1012 .clo_enqueue = lov_lock_enqueue,
1013 .clo_wait = lov_lock_wait,
1014 .clo_use = lov_lock_use,
1015 .clo_unuse = lov_lock_unuse,
1016 .clo_fits_into = lov_lock_fits_into,
1017 .clo_delete = lov_lock_delete,
1018 .clo_print = lov_lock_print
1021 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1022 struct cl_lock *lock, const struct cl_io *io)
1024 struct lov_lock *lck;
1028 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1030 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1031 result = lov_lock_sub_init(env, lck, io);
1037 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1038 struct cl_lock *parent)
1040 struct cl_lock_closure *closure;
1042 closure = &lov_env_info(env)->lti_closure;
1043 LASSERT(list_empty(&closure->clc_list));
1044 cl_lock_closure_init(env, closure, parent, 1);