1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * Implementation of cl_lock for LOV layer.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_LOV
43 #include "lov_cl_internal.h"
45 /** \addtogroup lov lov @{ */
47 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
48 struct cl_lock *parent);
50 /*****************************************************************************
52 * Lov lock operations.
56 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
57 struct cl_lock *parent,
58 struct lov_lock_sub *lls)
60 struct lov_sublock_env *subenv;
61 struct lov_io *lio = lov_env_io(env);
62 struct cl_io *io = lio->lis_cl.cis_io;
63 struct lov_io_sub *sub;
65 subenv = &lov_env_session(env)->ls_subenv;
68 * FIXME: We tend to use the subio's env & io to call the sublock
69 * lock operations because osc lock sometimes stores some control
70 * variables in thread's IO infomation(Now only lockless information).
71 * However, if the lock's host(object) is different from the object
72 * for current IO, we have no way to get the subenv and subio because
73 * they are not initialized at all. As a temp fix, in this case,
74 * we still borrow the parent's env to call sublock operations.
76 if (!cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
77 subenv->lse_env = env;
79 subenv->lse_sub = NULL;
82 sub = lov_sub_get(env, lio, lls->sub_stripe);
84 subenv->lse_env = sub->sub_env;
85 subenv->lse_io = sub->sub_io;
86 subenv->lse_sub = sub;
94 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
96 if (subenv && subenv->lse_sub)
97 lov_sub_put(subenv->lse_sub);
100 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
101 struct cl_lock *sublock, int idx,
102 struct lov_lock_link *link)
104 struct lovsub_lock *lsl;
105 struct cl_lock *parent = lck->lls_cl.cls_lock;
108 LASSERT(cl_lock_is_mutexed(parent));
109 LASSERT(cl_lock_is_mutexed(sublock));
112 lsl = cl2sub_lock(sublock);
114 * check that sub-lock doesn't have lock link to this top-lock.
116 LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
117 LASSERT(idx < lck->lls_nr);
119 lck->lls_sub[idx].sub_lock = lsl;
120 lck->lls_nr_filled++;
121 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
122 list_add_tail(&link->lll_list, &lsl->lss_parents);
124 link->lll_super = lck;
126 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
127 lck->lls_sub[idx].sub_flags |= LSF_HELD;
128 cl_lock_user_add(env, sublock);
130 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
131 LASSERT(rc == 0); /* there is no way this can fail, currently */
135 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
136 const struct cl_io *io,
137 struct lov_lock *lck,
138 int idx, struct lov_lock_link **out)
140 struct cl_lock *sublock;
141 struct cl_lock *parent;
142 struct lov_lock_link *link;
144 LASSERT(idx < lck->lls_nr);
147 OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
149 struct lov_sublock_env *subenv;
150 struct lov_lock_sub *lls;
151 struct cl_lock_descr *descr;
153 parent = lck->lls_cl.cls_lock;
154 lls = &lck->lls_sub[idx];
155 descr = &lls->sub_descr;
157 subenv = lov_sublock_env_get(env, parent, lls);
158 if (!IS_ERR(subenv)) {
159 /* CAVEAT: Don't try to add a field in lov_lock_sub
160 * to remember the subio. This is because lock is able
161 * to be cached, but this is not true for IO. This
162 * further means a sublock might be referenced in
163 * different io context. -jay */
165 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
166 descr, "lov-parent", parent);
167 lov_sublock_env_put(subenv);
170 sublock = (void*)subenv;
173 if (!IS_ERR(sublock))
176 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
178 sublock = ERR_PTR(-ENOMEM);
182 static void lov_sublock_unlock(const struct lu_env *env,
183 struct lovsub_lock *lsl,
184 struct cl_lock_closure *closure,
185 struct lov_sublock_env *subenv)
188 lov_sublock_env_put(subenv);
189 lsl->lss_active = NULL;
190 cl_lock_disclosure(env, closure);
194 static int lov_sublock_lock(const struct lu_env *env,
195 struct lov_lock_sub *lls,
196 struct cl_lock_closure *closure,
197 struct lov_sublock_env **lsep)
199 struct cl_lock *child;
203 LASSERT(list_empty(&closure->clc_list));
205 child = lls->sub_lock->lss_cl.cls_lock;
206 result = cl_lock_closure_build(env, child, closure);
208 struct cl_lock *parent = closure->clc_origin;
210 LASSERT(cl_lock_is_mutexed(child));
211 lls->sub_lock->lss_active = parent;
214 struct lov_sublock_env *subenv;
215 subenv = lov_sublock_env_get(env, parent, lls);
216 if (IS_ERR(subenv)) {
217 lov_sublock_unlock(env, lls->sub_lock,
219 result = PTR_ERR(subenv);
229 * Updates the result of a top-lock operation from a result of sub-lock
230 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
231 * over sub-locks and lov_subresult() is used to calculate return value of a
232 * top-operation. To this end, possible return values of sub-operations are
236 * - CLO_WAIT wait for event
237 * - CLO_REPEAT repeat top-operation
238 * - -ne fundamental error
240 * Top-level return code can only go down through this list. CLO_REPEAT
241 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
242 * has to be rechecked by the upper layer.
244 static int lov_subresult(int result, int rc)
249 LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
250 LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
251 CLASSERT(CLO_WAIT < CLO_REPEAT);
255 /* calculate ranks in the ordering above */
256 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
257 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
259 if (result_rank < rc_rank)
265 * Creates sub-locks for a given lov_lock for the first time.
267 * Goes through all sub-objects of top-object, and creates sub-locks on every
268 * sub-object intersecting with top-lock extent. This is complicated by the
269 * fact that top-lock (that is being created) can be accessed concurrently
270 * through already created sub-locks (possibly shared with other top-locks).
272 static int lov_lock_sub_init(const struct lu_env *env,
273 struct lov_lock *lck, const struct cl_io *io)
286 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
287 struct lov_layout_raid0 *r0 = lov_r0(loo);
288 struct cl_lock *parent = lck->lls_cl.cls_lock;
292 lck->lls_orig = parent->cll_descr;
293 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
294 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
296 start_stripe = lov_stripe_number(r0->lo_lsm, file_start);
297 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
299 * XXX for wide striping smarter algorithm is desirable,
300 * breaking out of the loop, early.
302 stripe = (start_stripe + i) % r0->lo_nr;
303 if (lov_stripe_intersects(r0->lo_lsm, stripe,
304 file_start, file_end, &start, &end))
308 OBD_ALLOC(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
309 if (lck->lls_sub == NULL)
314 * First, fill in sub-lock descriptions in
315 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
316 * (called below in this function, and by lov_lock_enqueue()) to
317 * create sub-locks. At this moment, no other thread can access
320 for (j = 0, nr = 0; j < i; ++j) {
321 stripe = (start_stripe + j) % r0->lo_nr;
322 if (lov_stripe_intersects(r0->lo_lsm, stripe,
323 file_start, file_end, &start, &end)) {
324 struct cl_lock_descr *descr;
326 descr = &lck->lls_sub[nr].sub_descr;
328 LASSERT(descr->cld_obj == NULL);
329 descr->cld_obj = lovsub2cl(r0->lo_sub[stripe]);
330 descr->cld_start = cl_index(descr->cld_obj, start);
331 descr->cld_end = cl_index(descr->cld_obj, end);
332 descr->cld_mode = parent->cll_descr.cld_mode;
333 descr->cld_gid = parent->cll_descr.cld_gid;
334 /* XXX has no effect */
335 lck->lls_sub[nr].sub_got = *descr;
336 lck->lls_sub[nr].sub_stripe = stripe;
340 LASSERT(nr == lck->lls_nr);
342 * Then, create sub-locks. Once at least one sub-lock was created,
343 * top-lock can be reached by other threads.
345 for (i = 0; i < lck->lls_nr; ++i) {
346 struct cl_lock *sublock;
347 struct lov_lock_link *link;
349 if (lck->lls_sub[i].sub_lock == NULL) {
350 sublock = lov_sublock_alloc(env, io, lck, i, &link);
351 if (IS_ERR(sublock)) {
352 result = PTR_ERR(sublock);
355 cl_lock_mutex_get(env, sublock);
356 cl_lock_mutex_get(env, parent);
358 * recheck under mutex that sub-lock wasn't created
359 * concurrently, and that top-lock is still alive.
361 if (lck->lls_sub[i].sub_lock == NULL &&
362 parent->cll_state < CLS_FREEING) {
363 lov_sublock_adopt(env, lck, sublock, i, link);
364 cl_lock_mutex_put(env, parent);
366 cl_lock_mutex_put(env, parent);
367 cl_lock_unhold(env, sublock,
368 "lov-parent", parent);
370 cl_lock_mutex_put(env, sublock);
374 * Some sub-locks can be missing at this point. This is not a problem,
375 * because enqueue will create them anyway. Main duty of this function
376 * is to fill in sub-lock descriptions in a race free manner.
381 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
382 int i, int deluser, int rc)
384 struct cl_lock *parent = lck->lls_cl.cls_lock;
386 LASSERT(cl_lock_is_mutexed(parent));
389 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
390 struct cl_lock *sublock;
393 LASSERT(lck->lls_sub[i].sub_lock != NULL);
394 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
395 LASSERT(cl_lock_is_mutexed(sublock));
397 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
399 cl_lock_user_del(env, sublock);
401 * If the last hold is released, and cancellation is pending
402 * for a sub-lock, release parent mutex, to avoid keeping it
403 * while sub-lock is being paged out.
405 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
406 sublock->cll_descr.cld_mode == CLM_GROUP ||
407 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
408 sublock->cll_holds == 1;
410 cl_lock_mutex_put(env, parent);
411 cl_lock_unhold(env, sublock, "lov-parent", parent);
413 cl_lock_mutex_get(env, parent);
414 rc = lov_subresult(rc, CLO_REPEAT);
417 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
418 * not backed by a reference on a
419 * sub-lock. lovsub_lock_delete() will clear
420 * lck->lls_sub[i].sub_lock under semaphores, just before
421 * sub-lock is destroyed.
427 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
430 struct cl_lock *parent = lck->lls_cl.cls_lock;
432 LASSERT(cl_lock_is_mutexed(parent));
435 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
436 struct cl_lock *sublock;
438 LASSERT(lck->lls_sub[i].sub_lock != NULL);
439 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
440 LASSERT(cl_lock_is_mutexed(sublock));
441 LASSERT(sublock->cll_state != CLS_FREEING);
443 lck->lls_sub[i].sub_flags |= LSF_HELD;
445 cl_lock_get_trust(sublock);
446 cl_lock_hold_add(env, sublock, "lov-parent", parent);
447 cl_lock_user_add(env, sublock);
448 cl_lock_put(env, sublock);
453 static void lov_lock_fini(const struct lu_env *env,
454 struct cl_lock_slice *slice)
456 struct lov_lock *lck;
460 lck = cl2lov_lock(slice);
461 LASSERT(lck->lls_nr_filled == 0);
462 if (lck->lls_sub != NULL) {
463 for (i = 0; i < lck->lls_nr; ++i)
465 * No sub-locks exists at this point, as sub-lock has
466 * a reference on its parent.
468 LASSERT(lck->lls_sub[i].sub_lock == NULL);
469 OBD_FREE(lck->lls_sub, lck->lls_nr * sizeof lck->lls_sub[0]);
471 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
476 * Tries to advance a state machine of a given sub-lock toward enqueuing of
479 * \retval 0 if state-transition can proceed
480 * \retval -ve otherwise.
482 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
483 struct cl_lock *sublock,
484 struct cl_io *io, __u32 enqflags, int last)
489 /* first, try to enqueue a sub-lock ... */
490 result = cl_enqueue_try(env, sublock, io, enqflags);
491 if (sublock->cll_state == CLS_ENQUEUED)
492 /* if it is enqueued, try to `wait' on it---maybe it's already
494 result = cl_wait_try(env, sublock);
496 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
497 * parallel, otherwise---enqueue has to wait until sub-lock is granted
498 * before proceeding to the next one.
500 if (result == CLO_WAIT && sublock->cll_state <= CLS_HELD &&
501 enqflags & CEF_ASYNC && !last)
507 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
509 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
510 struct cl_io *io, struct lov_lock *lck, int idx)
512 struct lov_lock_link *link;
513 struct cl_lock *sublock;
516 LASSERT(parent->cll_depth == 1);
517 cl_lock_mutex_put(env, parent);
518 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
519 if (!IS_ERR(sublock))
520 cl_lock_mutex_get(env, sublock);
521 cl_lock_mutex_get(env, parent);
523 if (!IS_ERR(sublock)) {
524 if (parent->cll_state == CLS_QUEUING &&
525 lck->lls_sub[idx].sub_lock == NULL)
526 lov_sublock_adopt(env, lck, sublock, idx, link);
528 /* other thread allocated sub-lock, or enqueue is no
530 cl_lock_mutex_put(env, parent);
531 cl_lock_unhold(env, sublock, "lov-parent", parent);
532 cl_lock_mutex_get(env, parent);
534 cl_lock_mutex_put(env, sublock);
537 result = PTR_ERR(sublock);
542 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
543 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
544 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
545 * state machines in the face of sub-locks sharing (by multiple top-locks),
546 * and concurrent sub-lock cancellations.
548 static int lov_lock_enqueue(const struct lu_env *env,
549 const struct cl_lock_slice *slice,
550 struct cl_io *io, __u32 enqflags)
552 struct cl_lock *lock = slice->cls_lock;
553 struct lov_lock *lck = cl2lov_lock(slice);
554 struct cl_lock_closure *closure = lov_closure_get(env, lock);
557 enum cl_lock_state minstate;
561 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
563 struct lovsub_lock *sub;
564 struct lov_lock_sub *lls;
565 struct cl_lock *sublock;
566 struct lov_sublock_env *subenv;
568 if (lock->cll_state != CLS_QUEUING) {
570 * Lock might have left QUEUING state if previous
571 * iteration released its mutex. Stop enqueing in this
572 * case and let the upper layer to decide what to do.
574 LASSERT(i > 0 && result != 0);
578 lls = &lck->lls_sub[i];
581 * Sub-lock might have been canceled, while top-lock was
585 result = lov_sublock_fill(env, lock, io, lck, i);
586 /* lov_sublock_fill() released @lock mutex,
590 sublock = sub->lss_cl.cls_lock;
591 rc = lov_sublock_lock(env, lls, closure, &subenv);
593 lov_sublock_hold(env, lck, i);
594 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
595 subenv->lse_io, enqflags,
596 i == lck->lls_nr - 1);
597 minstate = min(minstate, sublock->cll_state);
599 * Don't hold a sub-lock in CLS_CACHED state, see
600 * description for lov_lock::lls_sub.
602 if (sublock->cll_state > CLS_HELD)
603 rc = lov_sublock_release(env, lck, i, 1, rc);
604 lov_sublock_unlock(env, sub, closure, subenv);
606 result = lov_subresult(result, rc);
610 cl_lock_closure_fini(closure);
611 RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
614 static int lov_lock_unuse(const struct lu_env *env,
615 const struct cl_lock_slice *slice)
617 struct lov_lock *lck = cl2lov_lock(slice);
618 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
624 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
626 struct lovsub_lock *sub;
627 struct cl_lock *sublock;
628 struct lov_lock_sub *lls;
629 struct lov_sublock_env *subenv;
631 /* top-lock state cannot change concurrently, because single
632 * thread (one that released the last hold) carries unlocking
633 * to the completion. */
634 LASSERT(slice->cls_lock->cll_state == CLS_UNLOCKING);
635 lls = &lck->lls_sub[i];
640 sublock = sub->lss_cl.cls_lock;
641 rc = lov_sublock_lock(env, lls, closure, &subenv);
643 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
644 LASSERT(sublock->cll_state == CLS_HELD);
645 rc = cl_unuse_try(subenv->lse_env, sublock);
647 rc = lov_sublock_release(env, lck,
650 lov_sublock_unlock(env, sub, closure, subenv);
652 result = lov_subresult(result, rc);
656 if (result == 0 && lck->lls_unuse_race) {
657 lck->lls_unuse_race = 0;
660 cl_lock_closure_fini(closure);
664 static int lov_lock_wait(const struct lu_env *env,
665 const struct cl_lock_slice *slice)
667 struct lov_lock *lck = cl2lov_lock(slice);
668 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
669 enum cl_lock_state minstate;
675 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
677 struct lovsub_lock *sub;
678 struct cl_lock *sublock;
679 struct lov_lock_sub *lls;
680 struct lov_sublock_env *subenv;
682 lls = &lck->lls_sub[i];
684 LASSERT(sub != NULL);
685 sublock = sub->lss_cl.cls_lock;
686 rc = lov_sublock_lock(env, lls, closure, &subenv);
688 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
689 if (sublock->cll_state < CLS_HELD)
690 rc = cl_wait_try(env, sublock);
692 minstate = min(minstate, sublock->cll_state);
693 lov_sublock_unlock(env, sub, closure, subenv);
695 result = lov_subresult(result, rc);
699 cl_lock_closure_fini(closure);
700 RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
703 static int lov_lock_use(const struct lu_env *env,
704 const struct cl_lock_slice *slice)
706 struct lov_lock *lck = cl2lov_lock(slice);
707 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
711 LASSERT(slice->cls_lock->cll_state == CLS_CACHED);
714 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
716 struct lovsub_lock *sub;
717 struct cl_lock *sublock;
718 struct lov_lock_sub *lls;
719 struct lov_sublock_env *subenv;
721 if (slice->cls_lock->cll_state != CLS_CACHED) {
722 /* see comment in lov_lock_enqueue(). */
723 LASSERT(i > 0 && result != 0);
727 * if a sub-lock was destroyed while top-lock was in
728 * CLS_CACHED state, top-lock would have been moved into
729 * CLS_NEW state, so all sub-locks have to be in place.
731 lls = &lck->lls_sub[i];
733 LASSERT(sub != NULL);
734 sublock = sub->lss_cl.cls_lock;
735 rc = lov_sublock_lock(env, lls, closure, &subenv);
737 LASSERT(sublock->cll_state != CLS_FREEING);
738 lov_sublock_hold(env, lck, i);
739 if (sublock->cll_state == CLS_CACHED) {
740 rc = cl_use_try(subenv->lse_env, sublock);
742 rc = lov_sublock_release(env, lck,
746 lov_sublock_unlock(env, sub, closure, subenv);
748 result = lov_subresult(result, rc);
752 cl_lock_closure_fini(closure);
757 static int lock_lock_multi_match()
759 struct cl_lock *lock = slice->cls_lock;
760 struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
761 struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
762 struct lov_layout_raid0 *r0 = lov_r0(loo);
763 struct lov_lock_sub *sub;
764 struct cl_object *subobj;
771 fstart = cl_offset(need->cld_obj, need->cld_start);
772 fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
773 subneed->cld_mode = need->cld_mode;
774 cl_lock_mutex_get(env, lock);
775 for (i = 0; i < lov->lls_nr; ++i) {
776 sub = &lov->lls_sub[i];
777 if (sub->sub_lock == NULL)
779 subobj = sub->sub_descr.cld_obj;
780 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
781 fstart, fend, &start, &end))
783 subneed->cld_start = cl_index(subobj, start);
784 subneed->cld_end = cl_index(subobj, end);
785 subneed->cld_obj = subobj;
786 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
791 cl_lock_mutex_put(env, lock);
796 * Check if the extent region \a descr is covered by \a child against the
797 * specific \a stripe.
799 static int lov_lock_stripe_is_matching(const struct lu_env *env,
800 struct lov_object *lov, int stripe,
801 const struct cl_lock_descr *child,
802 const struct cl_lock_descr *descr)
804 struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
809 if (lov_r0(lov)->lo_nr == 1)
810 return cl_lock_ext_match(child, descr);
813 * For a multi-stripes object:
814 * - make sure the descr only covers child's stripe, and
815 * - check if extent is matching.
817 start = cl_offset(&lov->lo_cl, descr->cld_start);
818 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
819 result = end - start <= lsm->lsm_stripe_size &&
820 stripe == lov_stripe_number(lsm, start) &&
821 stripe == lov_stripe_number(lsm, end);
823 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
827 subd->cld_obj = NULL; /* don't need sub object at all */
828 subd->cld_mode = descr->cld_mode;
829 subd->cld_gid = descr->cld_gid;
830 result = lov_stripe_intersects(lsm, stripe, start, end,
831 &sub_start, &sub_end);
833 subd->cld_start = cl_index(child->cld_obj, sub_start);
834 subd->cld_end = cl_index(child->cld_obj, sub_end);
835 result = cl_lock_ext_match(child, subd);
841 * An implementation of cl_lock_operations::clo_fits_into() method.
843 * Checks whether a lock (given by \a slice) is suitable for \a
844 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
847 * \see ccc_lock_fits_into().
849 static int lov_lock_fits_into(const struct lu_env *env,
850 const struct cl_lock_slice *slice,
851 const struct cl_lock_descr *need,
852 const struct cl_io *io)
854 struct lov_lock *lov = cl2lov_lock(slice);
855 struct lov_object *obj = cl2lov(slice->cls_obj);
858 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
859 LASSERT(lov->lls_nr > 0);
863 if (need->cld_mode == CLM_GROUP)
865 * always allow to match group lock.
867 result = cl_lock_ext_match(&lov->lls_orig, need);
868 else if (lov->lls_nr == 1) {
869 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
870 result = lov_lock_stripe_is_matching(env,
871 cl2lov(slice->cls_obj),
872 lov->lls_sub[0].sub_stripe,
874 } else if (io->ci_type != CIT_TRUNC && io->ci_type != CIT_MISC &&
875 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
877 * Multi-stripe locks are only suitable for `quick' IO and for
883 * Most general case: multi-stripe existing lock, and
884 * (potentially) multi-stripe @need lock. Check that @need is
885 * covered by @lov's sub-locks.
887 * For now, ignore lock expansions made by the server, and
888 * match against original lock extent.
890 result = cl_lock_ext_match(&lov->lls_orig, need);
891 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %i %i/%i: %i\n",
892 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
893 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
898 void lov_lock_unlink(const struct lu_env *env,
899 struct lov_lock_link *link, struct lovsub_lock *sub)
901 struct lov_lock *lck = link->lll_super;
902 struct cl_lock *parent = lck->lls_cl.cls_lock;
904 LASSERT(cl_lock_is_mutexed(parent));
905 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
908 list_del_init(&link->lll_list);
909 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
910 /* yank this sub-lock from parent's array */
911 lck->lls_sub[link->lll_idx].sub_lock = NULL;
912 LASSERT(lck->lls_nr_filled > 0);
913 lck->lls_nr_filled--;
914 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
915 cl_lock_put(env, parent);
916 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
920 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
921 struct lov_lock *lck,
922 struct lovsub_lock *sub)
924 struct lov_lock_link *scan;
926 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
929 list_for_each_entry(scan, &sub->lss_parents, lll_list) {
930 if (scan->lll_super == lck)
937 * An implementation of cl_lock_operations::clo_delete() method. This is
938 * invoked for "top-to-bottom" delete, when lock destruction starts from the
939 * top-lock, e.g., as a result of inode destruction.
941 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
942 * this is done separately elsewhere:
944 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
945 * each sub-object, purging its locks;
947 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
950 static void lov_lock_delete(const struct lu_env *env,
951 const struct cl_lock_slice *slice)
953 struct lov_lock *lck = cl2lov_lock(slice);
954 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
957 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
960 for (i = 0; i < lck->lls_nr; ++i) {
961 struct lov_lock_sub *lls;
962 struct lovsub_lock *lsl;
963 struct cl_lock *sublock;
966 lls = &lck->lls_sub[i];
971 sublock = lsl->lss_cl.cls_lock;
972 rc = lov_sublock_lock(env, lls, closure, NULL);
974 if (lck->lls_sub[i].sub_flags & LSF_HELD)
975 lov_sublock_release(env, lck, i, 1, 0);
976 if (sublock->cll_state < CLS_FREEING) {
977 struct lov_lock_link *link;
979 link = lov_lock_link_find(env, lck, lsl);
980 LASSERT(link != NULL);
981 lov_lock_unlink(env, link, lsl);
982 LASSERT(lck->lls_sub[i].sub_lock == NULL);
984 lov_sublock_unlock(env, lsl, closure, NULL);
985 } else if (rc == CLO_REPEAT) {
986 --i; /* repeat with this lock */
988 CL_LOCK_DEBUG(D_ERROR, env, sublock,
989 "Cannot get sub-lock for delete: %i\n",
993 cl_lock_closure_fini(closure);
997 static int lov_lock_print(const struct lu_env *env, void *cookie,
998 lu_printer_t p, const struct cl_lock_slice *slice)
1000 struct lov_lock *lck = cl2lov_lock(slice);
1003 (*p)(env, cookie, "%d\n", lck->lls_nr);
1004 for (i = 0; i < lck->lls_nr; ++i) {
1005 struct lov_lock_sub *sub;
1007 sub = &lck->lls_sub[i];
1008 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
1009 if (sub->sub_lock != NULL)
1010 cl_lock_print(env, cookie, p,
1011 sub->sub_lock->lss_cl.cls_lock);
1013 (*p)(env, cookie, "---\n");
1018 static const struct cl_lock_operations lov_lock_ops = {
1019 .clo_fini = lov_lock_fini,
1020 .clo_enqueue = lov_lock_enqueue,
1021 .clo_wait = lov_lock_wait,
1022 .clo_use = lov_lock_use,
1023 .clo_unuse = lov_lock_unuse,
1024 .clo_fits_into = lov_lock_fits_into,
1025 .clo_delete = lov_lock_delete,
1026 .clo_print = lov_lock_print
1029 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1030 struct cl_lock *lock, const struct cl_io *io)
1032 struct lov_lock *lck;
1036 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1038 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1039 result = lov_lock_sub_init(env, lck, io);
1045 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1046 struct cl_lock *parent)
1048 struct cl_lock_closure *closure;
1050 closure = &lov_env_info(env)->lti_closure;
1051 LASSERT(list_empty(&closure->clc_list));
1052 cl_lock_closure_init(env, closure, parent, 1);