1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Implementation of cl_lock for LOV layer.
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_LOV
45 #include "lov_cl_internal.h"
51 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
52 struct cl_lock *parent);
54 static int lov_lock_unuse(const struct lu_env *env,
55 const struct cl_lock_slice *slice);
56 /*****************************************************************************
58 * Lov lock operations.
62 static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
63 struct cl_lock *parent,
64 struct lov_lock_sub *lls)
66 struct lov_sublock_env *subenv;
67 struct lov_io *lio = lov_env_io(env);
68 struct cl_io *io = lio->lis_cl.cis_io;
69 struct lov_io_sub *sub;
71 subenv = &lov_env_session(env)->ls_subenv;
74 * FIXME: We tend to use the subio's env & io to call the sublock
75 * lock operations because osc lock sometimes stores some control
76 * variables in thread's IO infomation(Now only lockless information).
77 * However, if the lock's host(object) is different from the object
78 * for current IO, we have no way to get the subenv and subio because
79 * they are not initialized at all. As a temp fix, in this case,
80 * we still borrow the parent's env to call sublock operations.
82 if (!io || !cl_object_same(io->ci_obj, parent->cll_descr.cld_obj)) {
83 subenv->lse_env = env;
85 subenv->lse_sub = NULL;
87 sub = lov_sub_get(env, lio, lls->sub_stripe);
89 subenv->lse_env = sub->sub_env;
90 subenv->lse_io = sub->sub_io;
91 subenv->lse_sub = sub;
99 static void lov_sublock_env_put(struct lov_sublock_env *subenv)
101 if (subenv && subenv->lse_sub)
102 lov_sub_put(subenv->lse_sub);
105 static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
106 struct cl_lock *sublock, int idx,
107 struct lov_lock_link *link)
109 struct lovsub_lock *lsl;
110 struct cl_lock *parent = lck->lls_cl.cls_lock;
113 LASSERT(cl_lock_is_mutexed(parent));
114 LASSERT(cl_lock_is_mutexed(sublock));
117 lsl = cl2sub_lock(sublock);
119 * check that sub-lock doesn't have lock link to this top-lock.
121 LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
122 LASSERT(idx < lck->lls_nr);
124 lck->lls_sub[idx].sub_lock = lsl;
125 lck->lls_nr_filled++;
126 LASSERT(lck->lls_nr_filled <= lck->lls_nr);
127 cfs_list_add_tail(&link->lll_list, &lsl->lss_parents);
129 link->lll_super = lck;
131 lu_ref_add(&parent->cll_reference, "lov-child", sublock);
132 lck->lls_sub[idx].sub_flags |= LSF_HELD;
133 cl_lock_user_add(env, sublock);
135 rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
136 LASSERT(rc == 0); /* there is no way this can fail, currently */
140 static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
141 const struct cl_io *io,
142 struct lov_lock *lck,
143 int idx, struct lov_lock_link **out)
145 struct cl_lock *sublock;
146 struct cl_lock *parent;
147 struct lov_lock_link *link;
149 LASSERT(idx < lck->lls_nr);
152 OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO);
154 struct lov_sublock_env *subenv;
155 struct lov_lock_sub *lls;
156 struct cl_lock_descr *descr;
158 parent = lck->lls_cl.cls_lock;
159 lls = &lck->lls_sub[idx];
160 descr = &lls->sub_descr;
162 subenv = lov_sublock_env_get(env, parent, lls);
163 if (!IS_ERR(subenv)) {
164 /* CAVEAT: Don't try to add a field in lov_lock_sub
165 * to remember the subio. This is because lock is able
166 * to be cached, but this is not true for IO. This
167 * further means a sublock might be referenced in
168 * different io context. -jay */
170 sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
171 descr, "lov-parent", parent);
172 lov_sublock_env_put(subenv);
175 sublock = (void*)subenv;
178 if (!IS_ERR(sublock))
181 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
183 sublock = ERR_PTR(-ENOMEM);
187 static void lov_sublock_unlock(const struct lu_env *env,
188 struct lovsub_lock *lsl,
189 struct cl_lock_closure *closure,
190 struct lov_sublock_env *subenv)
193 lov_sublock_env_put(subenv);
194 lsl->lss_active = NULL;
195 cl_lock_disclosure(env, closure);
199 static int lov_sublock_lock(const struct lu_env *env,
200 struct lov_lock *lck,
201 struct lov_lock_sub *lls,
202 struct cl_lock_closure *closure,
203 struct lov_sublock_env **lsep)
205 struct lovsub_lock *sublock;
206 struct cl_lock *child;
210 LASSERT(cfs_list_empty(&closure->clc_list));
212 sublock = lls->sub_lock;
213 child = sublock->lss_cl.cls_lock;
214 result = cl_lock_closure_build(env, child, closure);
216 struct cl_lock *parent = closure->clc_origin;
218 LASSERT(cl_lock_is_mutexed(child));
219 sublock->lss_active = parent;
221 if (unlikely((child->cll_state == CLS_FREEING) ||
222 (child->cll_flags & CLF_CANCELLED))) {
223 struct lov_lock_link *link;
225 * we could race with lock deletion which temporarily
226 * put the lock in freeing state, bug 19080.
228 LASSERT(!(lls->sub_flags & LSF_HELD));
230 link = lov_lock_link_find(env, lck, sublock);
231 LASSERT(link != NULL);
232 lov_lock_unlink(env, link, sublock);
233 lov_sublock_unlock(env, sublock, closure, NULL);
234 lck->lls_cancel_race = 1;
237 struct lov_sublock_env *subenv;
238 subenv = lov_sublock_env_get(env, parent, lls);
239 if (IS_ERR(subenv)) {
240 lov_sublock_unlock(env, sublock,
242 result = PTR_ERR(subenv);
252 * Updates the result of a top-lock operation from a result of sub-lock
253 * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
254 * over sub-locks and lov_subresult() is used to calculate return value of a
255 * top-operation. To this end, possible return values of sub-operations are
259 * - CLO_WAIT wait for event
260 * - CLO_REPEAT repeat top-operation
261 * - -ne fundamental error
263 * Top-level return code can only go down through this list. CLO_REPEAT
264 * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
265 * has to be rechecked by the upper layer.
267 static int lov_subresult(int result, int rc)
274 LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT);
275 LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT);
276 CLASSERT(CLO_WAIT < CLO_REPEAT);
278 /* calculate ranks in the ordering above */
279 result_rank = result < 0 ? 1 + CLO_REPEAT : result;
280 rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
282 if (result_rank < rc_rank)
288 * Creates sub-locks for a given lov_lock for the first time.
290 * Goes through all sub-objects of top-object, and creates sub-locks on every
291 * sub-object intersecting with top-lock extent. This is complicated by the
292 * fact that top-lock (that is being created) can be accessed concurrently
293 * through already created sub-locks (possibly shared with other top-locks).
295 static int lov_lock_sub_init(const struct lu_env *env,
296 struct lov_lock *lck, const struct cl_io *io)
306 struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
307 struct lov_layout_raid0 *r0 = lov_r0(loo);
308 struct cl_lock *parent = lck->lls_cl.cls_lock;
312 lck->lls_orig = parent->cll_descr;
313 file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
314 file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
316 for (i = 0, nr = 0; i < r0->lo_nr; i++) {
318 * XXX for wide striping smarter algorithm is desirable,
319 * breaking out of the loop, early.
321 if (lov_stripe_intersects(r0->lo_lsm, i,
322 file_start, file_end, &start, &end))
326 OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
327 if (lck->lls_sub == NULL)
332 * First, fill in sub-lock descriptions in
333 * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
334 * (called below in this function, and by lov_lock_enqueue()) to
335 * create sub-locks. At this moment, no other thread can access
338 for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
339 if (lov_stripe_intersects(r0->lo_lsm, i,
340 file_start, file_end, &start, &end)) {
341 struct cl_lock_descr *descr;
343 descr = &lck->lls_sub[nr].sub_descr;
345 LASSERT(descr->cld_obj == NULL);
346 descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
347 descr->cld_start = cl_index(descr->cld_obj, start);
348 descr->cld_end = cl_index(descr->cld_obj, end);
349 descr->cld_mode = parent->cll_descr.cld_mode;
350 descr->cld_gid = parent->cll_descr.cld_gid;
351 descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
352 /* XXX has no effect */
353 lck->lls_sub[nr].sub_got = *descr;
354 lck->lls_sub[nr].sub_stripe = i;
358 LASSERT(nr == lck->lls_nr);
360 * Then, create sub-locks. Once at least one sub-lock was created,
361 * top-lock can be reached by other threads.
363 for (i = 0; i < lck->lls_nr; ++i) {
364 struct cl_lock *sublock;
365 struct lov_lock_link *link;
367 if (lck->lls_sub[i].sub_lock == NULL) {
368 sublock = lov_sublock_alloc(env, io, lck, i, &link);
369 if (IS_ERR(sublock)) {
370 result = PTR_ERR(sublock);
373 cl_lock_get_trust(sublock);
374 cl_lock_mutex_get(env, sublock);
375 cl_lock_mutex_get(env, parent);
377 * recheck under mutex that sub-lock wasn't created
378 * concurrently, and that top-lock is still alive.
380 if (lck->lls_sub[i].sub_lock == NULL &&
381 parent->cll_state < CLS_FREEING) {
382 lov_sublock_adopt(env, lck, sublock, i, link);
383 cl_lock_mutex_put(env, parent);
385 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
386 cl_lock_mutex_put(env, parent);
387 cl_lock_unhold(env, sublock,
388 "lov-parent", parent);
390 cl_lock_mutex_put(env, sublock);
391 cl_lock_put(env, sublock);
395 * Some sub-locks can be missing at this point. This is not a problem,
396 * because enqueue will create them anyway. Main duty of this function
397 * is to fill in sub-lock descriptions in a race free manner.
402 static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
403 int i, int deluser, int rc)
405 struct cl_lock *parent = lck->lls_cl.cls_lock;
407 LASSERT(cl_lock_is_mutexed(parent));
410 if (lck->lls_sub[i].sub_flags & LSF_HELD) {
411 struct cl_lock *sublock;
414 LASSERT(lck->lls_sub[i].sub_lock != NULL);
415 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
416 LASSERT(cl_lock_is_mutexed(sublock));
418 lck->lls_sub[i].sub_flags &= ~LSF_HELD;
420 cl_lock_user_del(env, sublock);
422 * If the last hold is released, and cancellation is pending
423 * for a sub-lock, release parent mutex, to avoid keeping it
424 * while sub-lock is being paged out.
426 dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
427 sublock->cll_descr.cld_mode == CLM_GROUP ||
428 (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
429 sublock->cll_holds == 1;
431 cl_lock_mutex_put(env, parent);
432 cl_lock_unhold(env, sublock, "lov-parent", parent);
434 cl_lock_mutex_get(env, parent);
435 rc = lov_subresult(rc, CLO_REPEAT);
438 * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
439 * not backed by a reference on a
440 * sub-lock. lovsub_lock_delete() will clear
441 * lck->lls_sub[i].sub_lock under semaphores, just before
442 * sub-lock is destroyed.
448 static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
451 struct cl_lock *parent = lck->lls_cl.cls_lock;
453 LASSERT(cl_lock_is_mutexed(parent));
456 if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
457 struct cl_lock *sublock;
459 LASSERT(lck->lls_sub[i].sub_lock != NULL);
460 sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
461 LASSERT(cl_lock_is_mutexed(sublock));
462 LASSERT(sublock->cll_state != CLS_FREEING);
464 lck->lls_sub[i].sub_flags |= LSF_HELD;
466 cl_lock_get_trust(sublock);
467 cl_lock_hold_add(env, sublock, "lov-parent", parent);
468 cl_lock_user_add(env, sublock);
469 cl_lock_put(env, sublock);
474 static void lov_lock_fini(const struct lu_env *env,
475 struct cl_lock_slice *slice)
477 struct lov_lock *lck;
481 lck = cl2lov_lock(slice);
482 LASSERT(lck->lls_nr_filled == 0);
483 if (lck->lls_sub != NULL) {
484 for (i = 0; i < lck->lls_nr; ++i)
486 * No sub-locks exists at this point, as sub-lock has
487 * a reference on its parent.
489 LASSERT(lck->lls_sub[i].sub_lock == NULL);
490 OBD_FREE_LARGE(lck->lls_sub,
491 lck->lls_nr * sizeof lck->lls_sub[0]);
493 OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
497 static int lov_lock_enqueue_wait(const struct lu_env *env,
498 struct lov_lock *lck,
499 struct cl_lock *sublock)
501 struct cl_lock *lock = lck->lls_cl.cls_lock;
505 LASSERT(cl_lock_is_mutexed(lock));
507 cl_lock_mutex_put(env, lock);
508 result = cl_lock_enqueue_wait(env, sublock, 0);
509 cl_lock_mutex_get(env, lock);
510 RETURN(result ?: CLO_REPEAT);
514 * Tries to advance a state machine of a given sub-lock toward enqueuing of
517 * \retval 0 if state-transition can proceed
518 * \retval -ve otherwise.
520 static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
521 struct cl_lock *sublock,
522 struct cl_io *io, __u32 enqflags, int last)
527 /* first, try to enqueue a sub-lock ... */
528 result = cl_enqueue_try(env, sublock, io, enqflags);
529 if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL))
530 /* if it is enqueued, try to `wait' on it---maybe it's already
532 result = cl_wait_try(env, sublock);
534 * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
535 * parallel, otherwise---enqueue has to wait until sub-lock is granted
536 * before proceeding to the next one.
538 if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
539 (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
545 * Helper function for lov_lock_enqueue() that creates missing sub-lock.
547 static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
548 struct cl_io *io, struct lov_lock *lck, int idx)
550 struct lov_lock_link *link;
551 struct cl_lock *sublock;
554 LASSERT(parent->cll_depth == 1);
555 cl_lock_mutex_put(env, parent);
556 sublock = lov_sublock_alloc(env, io, lck, idx, &link);
557 if (!IS_ERR(sublock))
558 cl_lock_mutex_get(env, sublock);
559 cl_lock_mutex_get(env, parent);
561 if (!IS_ERR(sublock)) {
562 cl_lock_get_trust(sublock);
563 if (parent->cll_state == CLS_QUEUING &&
564 lck->lls_sub[idx].sub_lock == NULL) {
565 lov_sublock_adopt(env, lck, sublock, idx, link);
567 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
568 /* other thread allocated sub-lock, or enqueue is no
570 cl_lock_mutex_put(env, parent);
571 cl_lock_unhold(env, sublock, "lov-parent", parent);
572 cl_lock_mutex_get(env, parent);
574 cl_lock_mutex_put(env, sublock);
575 cl_lock_put(env, sublock);
578 result = PTR_ERR(sublock);
583 * Implementation of cl_lock_operations::clo_enqueue() for lov layer. This
584 * function is rather subtle, as it enqueues top-lock (i.e., advances top-lock
585 * state machine from CLS_QUEUING to CLS_ENQUEUED states) by juggling sub-lock
586 * state machines in the face of sub-locks sharing (by multiple top-locks),
587 * and concurrent sub-lock cancellations.
589 static int lov_lock_enqueue(const struct lu_env *env,
590 const struct cl_lock_slice *slice,
591 struct cl_io *io, __u32 enqflags)
593 struct cl_lock *lock = slice->cls_lock;
594 struct lov_lock *lck = cl2lov_lock(slice);
595 struct cl_lock_closure *closure = lov_closure_get(env, lock);
598 enum cl_lock_state minstate;
602 for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
604 struct lovsub_lock *sub;
605 struct lov_lock_sub *lls;
606 struct cl_lock *sublock;
607 struct lov_sublock_env *subenv;
609 if (lock->cll_state != CLS_QUEUING) {
611 * Lock might have left QUEUING state if previous
612 * iteration released its mutex. Stop enqueing in this
613 * case and let the upper layer to decide what to do.
615 LASSERT(i > 0 && result != 0);
619 lls = &lck->lls_sub[i];
622 * Sub-lock might have been canceled, while top-lock was
626 result = lov_sublock_fill(env, lock, io, lck, i);
627 /* lov_sublock_fill() released @lock mutex,
631 sublock = sub->lss_cl.cls_lock;
632 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
634 lov_sublock_hold(env, lck, i);
635 rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
636 subenv->lse_io, enqflags,
637 i == lck->lls_nr - 1);
638 minstate = min(minstate, sublock->cll_state);
639 if (rc == CLO_WAIT) {
640 switch (sublock->cll_state) {
642 /* take recursive mutex, the lock is
643 * released in lov_lock_enqueue_wait.
645 cl_lock_mutex_get(env, sublock);
646 lov_sublock_unlock(env, sub, closure,
648 rc = lov_lock_enqueue_wait(env, lck,
652 rc = lov_sublock_release(env, lck, i,
655 lov_sublock_unlock(env, sub, closure,
660 LASSERT(sublock->cll_conflict == NULL);
661 lov_sublock_unlock(env, sub, closure, subenv);
664 result = lov_subresult(result, rc);
668 cl_lock_closure_fini(closure);
669 RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
672 static int lov_lock_unuse(const struct lu_env *env,
673 const struct cl_lock_slice *slice)
675 struct lov_lock *lck = cl2lov_lock(slice);
676 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
682 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
684 struct lovsub_lock *sub;
685 struct cl_lock *sublock;
686 struct lov_lock_sub *lls;
687 struct lov_sublock_env *subenv;
689 /* top-lock state cannot change concurrently, because single
690 * thread (one that released the last hold) carries unlocking
691 * to the completion. */
692 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
693 lls = &lck->lls_sub[i];
698 sublock = sub->lss_cl.cls_lock;
699 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
701 if (lls->sub_flags & LSF_HELD) {
702 LASSERT(sublock->cll_state == CLS_HELD ||
703 sublock->cll_state == CLS_ENQUEUED);
704 /* For AGL case, the sublock state maybe not
705 * match the lower layer state, so sync them
707 if (sublock->cll_users == 1 &&
708 sublock->cll_state == CLS_ENQUEUED) {
711 save = sublock->cll_descr.cld_enq_flags;
712 sublock->cll_descr.cld_enq_flags |=
714 cl_wait_try(env, sublock);
715 sublock->cll_descr.cld_enq_flags = save;
717 rc = cl_unuse_try(subenv->lse_env, sublock);
718 rc = lov_sublock_release(env, lck, i, 0, rc);
720 lov_sublock_unlock(env, sub, closure, subenv);
722 result = lov_subresult(result, rc);
725 if (result == 0 && lck->lls_cancel_race) {
726 lck->lls_cancel_race = 0;
729 cl_lock_closure_fini(closure);
734 static void lov_lock_cancel(const struct lu_env *env,
735 const struct cl_lock_slice *slice)
737 struct lov_lock *lck = cl2lov_lock(slice);
738 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
744 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
746 struct lovsub_lock *sub;
747 struct cl_lock *sublock;
748 struct lov_lock_sub *lls;
749 struct lov_sublock_env *subenv;
751 /* top-lock state cannot change concurrently, because single
752 * thread (one that released the last hold) carries unlocking
753 * to the completion. */
754 lls = &lck->lls_sub[i];
759 sublock = sub->lss_cl.cls_lock;
760 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
762 if (!(lls->sub_flags & LSF_HELD)) {
763 lov_sublock_unlock(env, sub, closure, subenv);
767 switch(sublock->cll_state) {
769 rc = cl_unuse_try(subenv->lse_env,
771 lov_sublock_release(env, lck, i, 0, 0);
774 /* TODO: it's not a good idea to cancel this
775 * lock because it's innocent. But it's
776 * acceptable. The better way would be to
777 * define a new lock method to unhold the
779 cl_lock_cancel(env, sublock);
781 lov_sublock_release(env, lck, i, 1, 0);
784 lov_sublock_unlock(env, sub, closure, subenv);
787 if (rc == CLO_REPEAT) {
792 result = lov_subresult(result, rc);
796 CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
797 "lov_lock_cancel fails with %d.\n", result);
799 cl_lock_closure_fini(closure);
802 static int lov_lock_wait(const struct lu_env *env,
803 const struct cl_lock_slice *slice)
805 struct lov_lock *lck = cl2lov_lock(slice);
806 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
807 enum cl_lock_state minstate;
815 for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
816 i < lck->lls_nr; ++i) {
818 struct lovsub_lock *sub;
819 struct cl_lock *sublock;
820 struct lov_lock_sub *lls;
821 struct lov_sublock_env *subenv;
823 lls = &lck->lls_sub[i];
825 LASSERT(sub != NULL);
826 sublock = sub->lss_cl.cls_lock;
827 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
829 LASSERT(sublock->cll_state >= CLS_ENQUEUED);
830 if (sublock->cll_state < CLS_HELD)
831 rc = cl_wait_try(env, sublock);
833 minstate = min(minstate, sublock->cll_state);
834 lov_sublock_unlock(env, sub, closure, subenv);
836 if (rc == CLO_REENQUEUED) {
840 result = lov_subresult(result, rc);
844 /* Each sublock only can be reenqueued once, so will not loop for
846 if (result == 0 && reenqueued != 0)
848 cl_lock_closure_fini(closure);
849 RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
852 static int lov_lock_use(const struct lu_env *env,
853 const struct cl_lock_slice *slice)
855 struct lov_lock *lck = cl2lov_lock(slice);
856 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
860 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
863 for (result = 0, i = 0; i < lck->lls_nr; ++i) {
865 struct lovsub_lock *sub;
866 struct cl_lock *sublock;
867 struct lov_lock_sub *lls;
868 struct lov_sublock_env *subenv;
870 LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
872 lls = &lck->lls_sub[i];
876 * Sub-lock might have been canceled, while top-lock was
883 sublock = sub->lss_cl.cls_lock;
884 rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
886 LASSERT(sublock->cll_state != CLS_FREEING);
887 lov_sublock_hold(env, lck, i);
888 if (sublock->cll_state == CLS_CACHED) {
889 rc = cl_use_try(subenv->lse_env, sublock, 0);
891 rc = lov_sublock_release(env, lck,
893 } else if (sublock->cll_state == CLS_NEW) {
894 /* Sub-lock might have been canceled, while
895 * top-lock was cached. */
897 lov_sublock_release(env, lck, i, 1, result);
899 lov_sublock_unlock(env, sub, closure, subenv);
901 result = lov_subresult(result, rc);
906 if (lck->lls_cancel_race) {
908 * If there is unlocking happened at the same time, then
909 * sublock_lock state should be FREEING, and lov_sublock_lock
910 * should return CLO_REPEAT. In this case, it should return
911 * ESTALE, and up layer should reset the lock state to be NEW.
913 lck->lls_cancel_race = 0;
914 LASSERT(result != 0);
917 cl_lock_closure_fini(closure);
922 static int lock_lock_multi_match()
924 struct cl_lock *lock = slice->cls_lock;
925 struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
926 struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
927 struct lov_layout_raid0 *r0 = lov_r0(loo);
928 struct lov_lock_sub *sub;
929 struct cl_object *subobj;
936 fstart = cl_offset(need->cld_obj, need->cld_start);
937 fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
938 subneed->cld_mode = need->cld_mode;
939 cl_lock_mutex_get(env, lock);
940 for (i = 0; i < lov->lls_nr; ++i) {
941 sub = &lov->lls_sub[i];
942 if (sub->sub_lock == NULL)
944 subobj = sub->sub_descr.cld_obj;
945 if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe,
946 fstart, fend, &start, &end))
948 subneed->cld_start = cl_index(subobj, start);
949 subneed->cld_end = cl_index(subobj, end);
950 subneed->cld_obj = subobj;
951 if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
956 cl_lock_mutex_put(env, lock);
961 * Check if the extent region \a descr is covered by \a child against the
962 * specific \a stripe.
964 static int lov_lock_stripe_is_matching(const struct lu_env *env,
965 struct lov_object *lov, int stripe,
966 const struct cl_lock_descr *child,
967 const struct cl_lock_descr *descr)
969 struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm;
974 if (lov_r0(lov)->lo_nr == 1)
975 return cl_lock_ext_match(child, descr);
978 * For a multi-stripes object:
979 * - make sure the descr only covers child's stripe, and
980 * - check if extent is matching.
982 start = cl_offset(&lov->lo_cl, descr->cld_start);
983 end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
984 result = end - start <= lsm->lsm_stripe_size &&
985 stripe == lov_stripe_number(lsm, start) &&
986 stripe == lov_stripe_number(lsm, end);
988 struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
992 subd->cld_obj = NULL; /* don't need sub object at all */
993 subd->cld_mode = descr->cld_mode;
994 subd->cld_gid = descr->cld_gid;
995 result = lov_stripe_intersects(lsm, stripe, start, end,
996 &sub_start, &sub_end);
998 subd->cld_start = cl_index(child->cld_obj, sub_start);
999 subd->cld_end = cl_index(child->cld_obj, sub_end);
1000 result = cl_lock_ext_match(child, subd);
1006 * An implementation of cl_lock_operations::clo_fits_into() method.
1008 * Checks whether a lock (given by \a slice) is suitable for \a
1009 * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
1012 * \see ccc_lock_fits_into().
1014 static int lov_lock_fits_into(const struct lu_env *env,
1015 const struct cl_lock_slice *slice,
1016 const struct cl_lock_descr *need,
1017 const struct cl_io *io)
1019 struct lov_lock *lov = cl2lov_lock(slice);
1020 struct lov_object *obj = cl2lov(slice->cls_obj);
1023 LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
1024 LASSERT(lov->lls_nr > 0);
1028 if (need->cld_mode == CLM_GROUP)
1030 * always allow to match group lock.
1032 result = cl_lock_ext_match(&lov->lls_orig, need);
1033 else if (lov->lls_nr == 1) {
1034 struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
1035 result = lov_lock_stripe_is_matching(env,
1036 cl2lov(slice->cls_obj),
1037 lov->lls_sub[0].sub_stripe,
1039 } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
1040 !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
1042 * Multi-stripe locks are only suitable for `quick' IO and for
1048 * Most general case: multi-stripe existing lock, and
1049 * (potentially) multi-stripe @need lock. Check that @need is
1050 * covered by @lov's sub-locks.
1052 * For now, ignore lock expansions made by the server, and
1053 * match against original lock extent.
1055 result = cl_lock_ext_match(&lov->lls_orig, need);
1056 CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
1057 PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
1058 lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
1063 void lov_lock_unlink(const struct lu_env *env,
1064 struct lov_lock_link *link, struct lovsub_lock *sub)
1066 struct lov_lock *lck = link->lll_super;
1067 struct cl_lock *parent = lck->lls_cl.cls_lock;
1069 LASSERT(cl_lock_is_mutexed(parent));
1070 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1073 cfs_list_del_init(&link->lll_list);
1074 LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
1075 /* yank this sub-lock from parent's array */
1076 lck->lls_sub[link->lll_idx].sub_lock = NULL;
1077 LASSERT(lck->lls_nr_filled > 0);
1078 lck->lls_nr_filled--;
1079 lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
1080 cl_lock_put(env, parent);
1081 OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
1085 struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
1086 struct lov_lock *lck,
1087 struct lovsub_lock *sub)
1089 struct lov_lock_link *scan;
1091 LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
1094 cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
1095 if (scan->lll_super == lck)
1102 * An implementation of cl_lock_operations::clo_delete() method. This is
1103 * invoked for "top-to-bottom" delete, when lock destruction starts from the
1104 * top-lock, e.g., as a result of inode destruction.
1106 * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
1107 * this is done separately elsewhere:
1109 * - for inode destruction, lov_object_delete() calls cl_object_kill() for
1110 * each sub-object, purging its locks;
1112 * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
1113 * left in the cache.
1115 static void lov_lock_delete(const struct lu_env *env,
1116 const struct cl_lock_slice *slice)
1118 struct lov_lock *lck = cl2lov_lock(slice);
1119 struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
1120 struct lov_lock_link *link;
1124 LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
1127 for (i = 0; i < lck->lls_nr; ++i) {
1128 struct lov_lock_sub *lls = &lck->lls_sub[i];
1129 struct lovsub_lock *lsl = lls->sub_lock;
1131 if (lsl == NULL) /* already removed */
1134 rc = lov_sublock_lock(env, lck, lls, closure, NULL);
1135 if (rc == CLO_REPEAT) {
1141 LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
1143 if (lls->sub_flags & LSF_HELD)
1144 lov_sublock_release(env, lck, i, 1, 0);
1146 link = lov_lock_link_find(env, lck, lsl);
1147 LASSERT(link != NULL);
1148 lov_lock_unlink(env, link, lsl);
1149 LASSERT(lck->lls_sub[i].sub_lock == NULL);
1151 lov_sublock_unlock(env, lsl, closure, NULL);
1154 cl_lock_closure_fini(closure);
1158 static int lov_lock_print(const struct lu_env *env, void *cookie,
1159 lu_printer_t p, const struct cl_lock_slice *slice)
1161 struct lov_lock *lck = cl2lov_lock(slice);
1164 (*p)(env, cookie, "%d\n", lck->lls_nr);
1165 for (i = 0; i < lck->lls_nr; ++i) {
1166 struct lov_lock_sub *sub;
1168 sub = &lck->lls_sub[i];
1169 (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
1170 if (sub->sub_lock != NULL)
1171 cl_lock_print(env, cookie, p,
1172 sub->sub_lock->lss_cl.cls_lock);
1174 (*p)(env, cookie, "---\n");
1179 static const struct cl_lock_operations lov_lock_ops = {
1180 .clo_fini = lov_lock_fini,
1181 .clo_enqueue = lov_lock_enqueue,
1182 .clo_wait = lov_lock_wait,
1183 .clo_use = lov_lock_use,
1184 .clo_unuse = lov_lock_unuse,
1185 .clo_cancel = lov_lock_cancel,
1186 .clo_fits_into = lov_lock_fits_into,
1187 .clo_delete = lov_lock_delete,
1188 .clo_print = lov_lock_print
1191 int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
1192 struct cl_lock *lock, const struct cl_io *io)
1194 struct lov_lock *lck;
1198 OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO);
1200 cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
1201 result = lov_lock_sub_init(env, lck, io);
1207 static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
1208 struct cl_lock *parent)
1210 struct cl_lock_closure *closure;
1212 closure = &lov_env_info(env)->lti_closure;
1213 LASSERT(cfs_list_empty(&closure->clc_list));
1214 cl_lock_closure_init(env, closure, parent, 1);