4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class;
52 static cfs_mem_cache_t *cl_lock_kmem;
54 static struct lu_kmem_descr cl_lock_caches[] = {
56 .ckd_cache = &cl_lock_kmem,
57 .ckd_name = "cl_lock_kmem",
58 .ckd_size = sizeof (struct cl_lock)
65 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
66 #define CS_LOCK_INC(o, item) \
67 cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
68 #define CS_LOCK_DEC(o, item) \
69 cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
70 #define CS_LOCKSTATE_INC(o, state) \
71 cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
72 #define CS_LOCKSTATE_DEC(o, state) \
73 cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
75 #define CS_LOCK_INC(o, item)
76 #define CS_LOCK_DEC(o, item)
77 #define CS_LOCKSTATE_INC(o, state)
78 #define CS_LOCKSTATE_DEC(o, state)
82 * Basic lock invariant that is maintained at all times. Caller either has a
83 * reference to \a lock, or somehow assures that \a lock cannot be freed.
85 * \see cl_lock_invariant()
87 static int cl_lock_invariant_trusted(const struct lu_env *env,
88 const struct cl_lock *lock)
90 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
91 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
92 lock->cll_holds >= lock->cll_users &&
93 lock->cll_holds >= 0 &&
94 lock->cll_users >= 0 &&
99 * Stronger lock invariant, checking that caller has a reference on a lock.
101 * \see cl_lock_invariant_trusted()
103 static int cl_lock_invariant(const struct lu_env *env,
104 const struct cl_lock *lock)
108 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
109 cl_lock_invariant_trusted(env, lock);
110 if (!result && env != NULL)
111 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
116 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
118 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
120 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
124 * Returns a set of counters for this lock, depending on a lock nesting.
126 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
127 const struct cl_lock *lock)
129 struct cl_thread_info *info;
130 enum clt_nesting_level nesting;
132 info = cl_env_info(env);
133 nesting = cl_lock_nesting(lock);
134 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
135 return &info->clt_counters[nesting];
138 static void cl_lock_trace0(int level, const struct lu_env *env,
139 const char *prefix, const struct cl_lock *lock,
140 const char *func, const int line)
142 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
143 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
144 "(%p/%d/%d) at %s():%d\n",
145 prefix, lock, cfs_atomic_read(&lock->cll_ref),
146 lock->cll_guarder, lock->cll_depth,
147 lock->cll_state, lock->cll_error, lock->cll_holds,
148 lock->cll_users, lock->cll_flags,
149 env, h->coh_nesting, cl_lock_nr_mutexed(env),
152 #define cl_lock_trace(level, env, prefix, lock) \
153 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
155 #define RETIP ((unsigned long)__builtin_return_address(0))
157 #ifdef CONFIG_LOCKDEP
158 static struct lock_class_key cl_lock_key;
160 static void cl_lock_lockdep_init(struct cl_lock *lock)
162 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
165 static void cl_lock_lockdep_acquire(const struct lu_env *env,
166 struct cl_lock *lock, __u32 enqflags)
168 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
169 #ifdef HAVE_LOCK_MAP_ACQUIRE
170 lock_map_acquire(&lock->dep_map);
171 #else /* HAVE_LOCK_MAP_ACQUIRE */
172 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
173 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
174 /* check: */ 2, RETIP);
175 #endif /* HAVE_LOCK_MAP_ACQUIRE */
178 static void cl_lock_lockdep_release(const struct lu_env *env,
179 struct cl_lock *lock)
181 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
182 lock_release(&lock->dep_map, 0, RETIP);
185 #else /* !CONFIG_LOCKDEP */
187 static void cl_lock_lockdep_init(struct cl_lock *lock)
189 static void cl_lock_lockdep_acquire(const struct lu_env *env,
190 struct cl_lock *lock, __u32 enqflags)
192 static void cl_lock_lockdep_release(const struct lu_env *env,
193 struct cl_lock *lock)
196 #endif /* !CONFIG_LOCKDEP */
199 * Adds lock slice to the compound lock.
201 * This is called by cl_object_operations::coo_lock_init() methods to add a
202 * per-layer state to the lock. New state is added at the end of
203 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
205 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
207 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
208 struct cl_object *obj,
209 const struct cl_lock_operations *ops)
212 slice->cls_lock = lock;
213 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
214 slice->cls_obj = obj;
215 slice->cls_ops = ops;
218 EXPORT_SYMBOL(cl_lock_slice_add);
221 * Returns true iff a lock with the mode \a has provides at least the same
222 * guarantees as a lock with the mode \a need.
224 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
226 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
227 need == CLM_PHANTOM || need == CLM_GROUP);
228 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
229 has == CLM_PHANTOM || has == CLM_GROUP);
230 CLASSERT(CLM_PHANTOM < CLM_READ);
231 CLASSERT(CLM_READ < CLM_WRITE);
232 CLASSERT(CLM_WRITE < CLM_GROUP);
234 if (has != CLM_GROUP)
239 EXPORT_SYMBOL(cl_lock_mode_match);
242 * Returns true iff extent portions of lock descriptions match.
244 int cl_lock_ext_match(const struct cl_lock_descr *has,
245 const struct cl_lock_descr *need)
248 has->cld_start <= need->cld_start &&
249 has->cld_end >= need->cld_end &&
250 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
251 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
253 EXPORT_SYMBOL(cl_lock_ext_match);
256 * Returns true iff a lock with the description \a has provides at least the
257 * same guarantees as a lock with the description \a need.
259 int cl_lock_descr_match(const struct cl_lock_descr *has,
260 const struct cl_lock_descr *need)
263 cl_object_same(has->cld_obj, need->cld_obj) &&
264 cl_lock_ext_match(has, need);
266 EXPORT_SYMBOL(cl_lock_descr_match);
268 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
270 struct cl_object *obj = lock->cll_descr.cld_obj;
272 LINVRNT(!cl_lock_is_mutexed(lock));
275 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
277 while (!cfs_list_empty(&lock->cll_layers)) {
278 struct cl_lock_slice *slice;
280 slice = cfs_list_entry(lock->cll_layers.next,
281 struct cl_lock_slice, cls_linkage);
282 cfs_list_del_init(lock->cll_layers.next);
283 slice->cls_ops->clo_fini(env, slice);
285 CS_LOCK_DEC(obj, total);
286 CS_LOCKSTATE_DEC(obj, lock->cll_state);
287 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
288 cl_object_put(env, obj);
289 lu_ref_fini(&lock->cll_reference);
290 lu_ref_fini(&lock->cll_holders);
291 mutex_destroy(&lock->cll_guard);
292 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
297 * Releases a reference on a lock.
299 * When last reference is released, lock is returned to the cache, unless it
300 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
303 * \see cl_object_put(), cl_page_put()
305 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
307 struct cl_object *obj;
309 LINVRNT(cl_lock_invariant(env, lock));
311 obj = lock->cll_descr.cld_obj;
312 LINVRNT(obj != NULL);
314 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
315 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
317 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
318 if (lock->cll_state == CLS_FREEING) {
319 LASSERT(cfs_list_empty(&lock->cll_linkage));
320 cl_lock_free(env, lock);
322 CS_LOCK_DEC(obj, busy);
326 EXPORT_SYMBOL(cl_lock_put);
329 * Acquires an additional reference to a lock.
331 * This can be called only by caller already possessing a reference to \a
334 * \see cl_object_get(), cl_page_get()
336 void cl_lock_get(struct cl_lock *lock)
338 LINVRNT(cl_lock_invariant(NULL, lock));
339 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
340 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
341 cfs_atomic_inc(&lock->cll_ref);
343 EXPORT_SYMBOL(cl_lock_get);
346 * Acquires a reference to a lock.
348 * This is much like cl_lock_get(), except that this function can be used to
349 * acquire initial reference to the cached lock. Caller has to deal with all
350 * possible races. Use with care!
352 * \see cl_page_get_trust()
354 void cl_lock_get_trust(struct cl_lock *lock)
356 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
357 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
358 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
359 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
361 EXPORT_SYMBOL(cl_lock_get_trust);
364 * Helper function destroying the lock that wasn't completely initialized.
366 * Other threads can acquire references to the top-lock through its
367 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
369 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
371 cl_lock_mutex_get(env, lock);
372 cl_lock_cancel(env, lock);
373 cl_lock_delete(env, lock);
374 cl_lock_mutex_put(env, lock);
375 cl_lock_put(env, lock);
378 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
379 struct cl_object *obj,
380 const struct cl_io *io,
381 const struct cl_lock_descr *descr)
383 struct cl_lock *lock;
384 struct lu_object_header *head;
387 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
389 cfs_atomic_set(&lock->cll_ref, 1);
390 lock->cll_descr = *descr;
391 lock->cll_state = CLS_NEW;
393 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
395 CFS_INIT_LIST_HEAD(&lock->cll_layers);
396 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
397 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
398 lu_ref_init(&lock->cll_reference);
399 lu_ref_init(&lock->cll_holders);
400 mutex_init(&lock->cll_guard);
401 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
402 cfs_waitq_init(&lock->cll_wq);
403 head = obj->co_lu.lo_header;
404 CS_LOCKSTATE_INC(obj, CLS_NEW);
405 CS_LOCK_INC(obj, total);
406 CS_LOCK_INC(obj, create);
407 cl_lock_lockdep_init(lock);
408 cfs_list_for_each_entry(obj, &head->loh_layers,
412 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
414 cl_lock_finish(env, lock);
420 lock = ERR_PTR(-ENOMEM);
425 * Transfer the lock into INTRANSIT state and return the original state.
427 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
428 * \post state: CLS_INTRANSIT
431 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
432 struct cl_lock *lock)
434 enum cl_lock_state state = lock->cll_state;
436 LASSERT(cl_lock_is_mutexed(lock));
437 LASSERT(state != CLS_INTRANSIT);
438 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
439 "Malformed lock state %d.\n", state);
441 cl_lock_state_set(env, lock, CLS_INTRANSIT);
442 lock->cll_intransit_owner = cfs_current();
443 cl_lock_hold_add(env, lock, "intransit", cfs_current());
446 EXPORT_SYMBOL(cl_lock_intransit);
449 * Exit the intransit state and restore the lock state to the original state
451 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
452 enum cl_lock_state state)
454 LASSERT(cl_lock_is_mutexed(lock));
455 LASSERT(lock->cll_state == CLS_INTRANSIT);
456 LASSERT(state != CLS_INTRANSIT);
457 LASSERT(lock->cll_intransit_owner == cfs_current());
459 lock->cll_intransit_owner = NULL;
460 cl_lock_state_set(env, lock, state);
461 cl_lock_unhold(env, lock, "intransit", cfs_current());
463 EXPORT_SYMBOL(cl_lock_extransit);
466 * Checking whether the lock is intransit state
468 int cl_lock_is_intransit(struct cl_lock *lock)
470 LASSERT(cl_lock_is_mutexed(lock));
471 return lock->cll_state == CLS_INTRANSIT &&
472 lock->cll_intransit_owner != cfs_current();
474 EXPORT_SYMBOL(cl_lock_is_intransit);
476 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
477 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
478 * cover multiple stripes and can trigger cascading timeouts.
480 static int cl_lock_fits_into(const struct lu_env *env,
481 const struct cl_lock *lock,
482 const struct cl_lock_descr *need,
483 const struct cl_io *io)
485 const struct cl_lock_slice *slice;
487 LINVRNT(cl_lock_invariant_trusted(env, lock));
489 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
490 if (slice->cls_ops->clo_fits_into != NULL &&
491 !slice->cls_ops->clo_fits_into(env, slice, need, io))
497 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
498 struct cl_object *obj,
499 const struct cl_io *io,
500 const struct cl_lock_descr *need)
502 struct cl_lock *lock;
503 struct cl_object_header *head;
507 head = cl_object_header(obj);
508 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
509 CS_LOCK_INC(obj, lookup);
510 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
513 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
514 lock->cll_state < CLS_FREEING &&
515 lock->cll_error == 0 &&
516 !(lock->cll_flags & CLF_CANCELLED) &&
517 cl_lock_fits_into(env, lock, need, io);
518 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
519 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
522 cl_lock_get_trust(lock);
523 CS_LOCK_INC(obj, hit);
531 * Returns a lock matching description \a need.
533 * This is the main entry point into the cl_lock caching interface. First, a
534 * cache (implemented as a per-object linked list) is consulted. If lock is
535 * found there, it is returned immediately. Otherwise new lock is allocated
536 * and returned. In any case, additional reference to lock is acquired.
538 * \see cl_object_find(), cl_page_find()
540 static struct cl_lock *cl_lock_find(const struct lu_env *env,
541 const struct cl_io *io,
542 const struct cl_lock_descr *need)
544 struct cl_object_header *head;
545 struct cl_object *obj;
546 struct cl_lock *lock;
551 head = cl_object_header(obj);
553 spin_lock(&head->coh_lock_guard);
554 lock = cl_lock_lookup(env, obj, io, need);
555 spin_unlock(&head->coh_lock_guard);
558 lock = cl_lock_alloc(env, obj, io, need);
560 struct cl_lock *ghost;
562 spin_lock(&head->coh_lock_guard);
563 ghost = cl_lock_lookup(env, obj, io, need);
565 cfs_list_add_tail(&lock->cll_linkage,
567 spin_unlock(&head->coh_lock_guard);
568 CS_LOCK_INC(obj, busy);
570 spin_unlock(&head->coh_lock_guard);
572 * Other threads can acquire references to the
573 * top-lock through its sub-locks. Hence, it
574 * cannot be cl_lock_free()-ed immediately.
576 cl_lock_finish(env, lock);
585 * Returns existing lock matching given description. This is similar to
586 * cl_lock_find() except that no new lock is created, and returned lock is
587 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
589 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
590 const struct cl_lock_descr *need,
591 const char *scope, const void *source)
593 struct cl_object_header *head;
594 struct cl_object *obj;
595 struct cl_lock *lock;
598 head = cl_object_header(obj);
601 spin_lock(&head->coh_lock_guard);
602 lock = cl_lock_lookup(env, obj, io, need);
603 spin_unlock(&head->coh_lock_guard);
607 cl_lock_mutex_get(env, lock);
608 if (lock->cll_state == CLS_INTRANSIT)
609 /* Don't care return value. */
610 cl_lock_state_wait(env, lock);
611 if (lock->cll_state == CLS_FREEING) {
612 cl_lock_mutex_put(env, lock);
613 cl_lock_put(env, lock);
616 } while (lock == NULL);
618 cl_lock_hold_add(env, lock, scope, source);
619 cl_lock_user_add(env, lock);
620 if (lock->cll_state == CLS_CACHED)
621 cl_use_try(env, lock, 1);
622 if (lock->cll_state == CLS_HELD) {
623 cl_lock_mutex_put(env, lock);
624 cl_lock_lockdep_acquire(env, lock, 0);
625 cl_lock_put(env, lock);
627 cl_unuse_try(env, lock);
628 cl_lock_unhold(env, lock, scope, source);
629 cl_lock_mutex_put(env, lock);
630 cl_lock_put(env, lock);
636 EXPORT_SYMBOL(cl_lock_peek);
639 * Returns a slice within a lock, corresponding to the given layer in the
644 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
645 const struct lu_device_type *dtype)
647 const struct cl_lock_slice *slice;
649 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
652 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
653 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
658 EXPORT_SYMBOL(cl_lock_at);
660 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
662 struct cl_thread_counters *counters;
664 counters = cl_lock_counters(env, lock);
666 counters->ctc_nr_locks_locked++;
667 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
668 cl_lock_trace(D_TRACE, env, "got mutex", lock);
672 * Locks cl_lock object.
674 * This is used to manipulate cl_lock fields, and to serialize state
675 * transitions in the lock state machine.
677 * \post cl_lock_is_mutexed(lock)
679 * \see cl_lock_mutex_put()
681 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
683 LINVRNT(cl_lock_invariant(env, lock));
685 if (lock->cll_guarder == cfs_current()) {
686 LINVRNT(cl_lock_is_mutexed(lock));
687 LINVRNT(lock->cll_depth > 0);
689 struct cl_object_header *hdr;
690 struct cl_thread_info *info;
693 LINVRNT(lock->cll_guarder != cfs_current());
694 hdr = cl_object_header(lock->cll_descr.cld_obj);
696 * Check that mutices are taken in the bottom-to-top order.
698 info = cl_env_info(env);
699 for (i = 0; i < hdr->coh_nesting; ++i)
700 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
701 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
702 lock->cll_guarder = cfs_current();
703 LINVRNT(lock->cll_depth == 0);
705 cl_lock_mutex_tail(env, lock);
707 EXPORT_SYMBOL(cl_lock_mutex_get);
710 * Try-locks cl_lock object.
712 * \retval 0 \a lock was successfully locked
714 * \retval -EBUSY \a lock cannot be locked right now
716 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
718 * \see cl_lock_mutex_get()
720 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
724 LINVRNT(cl_lock_invariant_trusted(env, lock));
728 if (lock->cll_guarder == cfs_current()) {
729 LINVRNT(lock->cll_depth > 0);
730 cl_lock_mutex_tail(env, lock);
731 } else if (mutex_trylock(&lock->cll_guard)) {
732 LINVRNT(lock->cll_depth == 0);
733 lock->cll_guarder = cfs_current();
734 cl_lock_mutex_tail(env, lock);
739 EXPORT_SYMBOL(cl_lock_mutex_try);
742 {* Unlocks cl_lock object.
744 * \pre cl_lock_is_mutexed(lock)
746 * \see cl_lock_mutex_get()
748 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
750 struct cl_thread_counters *counters;
752 LINVRNT(cl_lock_invariant(env, lock));
753 LINVRNT(cl_lock_is_mutexed(lock));
754 LINVRNT(lock->cll_guarder == cfs_current());
755 LINVRNT(lock->cll_depth > 0);
757 counters = cl_lock_counters(env, lock);
758 LINVRNT(counters->ctc_nr_locks_locked > 0);
760 cl_lock_trace(D_TRACE, env, "put mutex", lock);
761 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
762 counters->ctc_nr_locks_locked--;
763 if (--lock->cll_depth == 0) {
764 lock->cll_guarder = NULL;
765 mutex_unlock(&lock->cll_guard);
768 EXPORT_SYMBOL(cl_lock_mutex_put);
771 * Returns true iff lock's mutex is owned by the current thread.
773 int cl_lock_is_mutexed(struct cl_lock *lock)
775 return lock->cll_guarder == cfs_current();
777 EXPORT_SYMBOL(cl_lock_is_mutexed);
780 * Returns number of cl_lock mutices held by the current thread (environment).
782 int cl_lock_nr_mutexed(const struct lu_env *env)
784 struct cl_thread_info *info;
789 * NOTE: if summation across all nesting levels (currently 2) proves
790 * too expensive, a summary counter can be added to
791 * struct cl_thread_info.
793 info = cl_env_info(env);
794 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
795 locked += info->clt_counters[i].ctc_nr_locks_locked;
798 EXPORT_SYMBOL(cl_lock_nr_mutexed);
800 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
802 LINVRNT(cl_lock_is_mutexed(lock));
803 LINVRNT(cl_lock_invariant(env, lock));
805 if (!(lock->cll_flags & CLF_CANCELLED)) {
806 const struct cl_lock_slice *slice;
808 lock->cll_flags |= CLF_CANCELLED;
809 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
811 if (slice->cls_ops->clo_cancel != NULL)
812 slice->cls_ops->clo_cancel(env, slice);
818 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
820 struct cl_object_header *head;
821 const struct cl_lock_slice *slice;
823 LINVRNT(cl_lock_is_mutexed(lock));
824 LINVRNT(cl_lock_invariant(env, lock));
827 if (lock->cll_state < CLS_FREEING) {
828 LASSERT(lock->cll_state != CLS_INTRANSIT);
829 cl_lock_state_set(env, lock, CLS_FREEING);
831 head = cl_object_header(lock->cll_descr.cld_obj);
833 spin_lock(&head->coh_lock_guard);
834 cfs_list_del_init(&lock->cll_linkage);
835 spin_unlock(&head->coh_lock_guard);
838 * From now on, no new references to this lock can be acquired
839 * by cl_lock_lookup().
841 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
843 if (slice->cls_ops->clo_delete != NULL)
844 slice->cls_ops->clo_delete(env, slice);
847 * From now on, no new references to this lock can be acquired
848 * by layer-specific means (like a pointer from struct
849 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
852 * Lock will be finally freed in cl_lock_put() when last of
853 * existing references goes away.
860 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
861 * top-lock (nesting == 0) accounts for this modification in the per-thread
862 * debugging counters. Sub-lock holds can be released by a thread different
863 * from one that acquired it.
865 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
868 struct cl_thread_counters *counters;
869 enum clt_nesting_level nesting;
871 lock->cll_holds += delta;
872 nesting = cl_lock_nesting(lock);
873 if (nesting == CNL_TOP) {
874 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
875 counters->ctc_nr_held += delta;
876 LASSERT(counters->ctc_nr_held >= 0);
881 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
882 * cl_lock_hold_mod() for the explanation of the debugging code.
884 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
887 struct cl_thread_counters *counters;
888 enum clt_nesting_level nesting;
890 lock->cll_users += delta;
891 nesting = cl_lock_nesting(lock);
892 if (nesting == CNL_TOP) {
893 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
894 counters->ctc_nr_used += delta;
895 LASSERT(counters->ctc_nr_used >= 0);
899 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
900 const char *scope, const void *source)
902 LINVRNT(cl_lock_is_mutexed(lock));
903 LINVRNT(cl_lock_invariant(env, lock));
904 LASSERT(lock->cll_holds > 0);
907 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
908 lu_ref_del(&lock->cll_holders, scope, source);
909 cl_lock_hold_mod(env, lock, -1);
910 if (lock->cll_holds == 0) {
911 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
912 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
913 lock->cll_descr.cld_mode == CLM_GROUP ||
914 lock->cll_state != CLS_CACHED)
916 * If lock is still phantom or grouplock when user is
917 * done with it---destroy the lock.
919 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
920 if (lock->cll_flags & CLF_CANCELPEND) {
921 lock->cll_flags &= ~CLF_CANCELPEND;
922 cl_lock_cancel0(env, lock);
924 if (lock->cll_flags & CLF_DOOMED) {
925 /* no longer doomed: it's dead... Jim. */
926 lock->cll_flags &= ~CLF_DOOMED;
927 cl_lock_delete0(env, lock);
932 EXPORT_SYMBOL(cl_lock_hold_release);
935 * Waits until lock state is changed.
937 * This function is called with cl_lock mutex locked, atomically releases
938 * mutex and goes to sleep, waiting for a lock state change (signaled by
939 * cl_lock_signal()), and re-acquires the mutex before return.
941 * This function is used to wait until lock state machine makes some progress
942 * and to emulate synchronous operations on top of asynchronous lock
945 * \retval -EINTR wait was interrupted
947 * \retval 0 wait wasn't interrupted
949 * \pre cl_lock_is_mutexed(lock)
951 * \see cl_lock_signal()
953 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
955 cfs_waitlink_t waiter;
956 cfs_sigset_t blocked;
960 LINVRNT(cl_lock_is_mutexed(lock));
961 LINVRNT(cl_lock_invariant(env, lock));
962 LASSERT(lock->cll_depth == 1);
963 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
965 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
966 result = lock->cll_error;
968 /* To avoid being interrupted by the 'non-fatal' signals
969 * (SIGCHLD, for instance), we'd block them temporarily.
971 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
973 cfs_waitlink_init(&waiter);
974 cfs_waitq_add(&lock->cll_wq, &waiter);
975 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
976 cl_lock_mutex_put(env, lock);
978 LASSERT(cl_lock_nr_mutexed(env) == 0);
980 /* Returning ERESTARTSYS instead of EINTR so syscalls
981 * can be restarted if signals are pending here */
982 result = -ERESTARTSYS;
983 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
984 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
985 if (!cfs_signal_pending())
989 cl_lock_mutex_get(env, lock);
990 cfs_set_current_state(CFS_TASK_RUNNING);
991 cfs_waitq_del(&lock->cll_wq, &waiter);
993 /* Restore old blocked signals */
994 cfs_restore_sigs(blocked);
998 EXPORT_SYMBOL(cl_lock_state_wait);
1000 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
1001 enum cl_lock_state state)
1003 const struct cl_lock_slice *slice;
1006 LINVRNT(cl_lock_is_mutexed(lock));
1007 LINVRNT(cl_lock_invariant(env, lock));
1009 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
1010 if (slice->cls_ops->clo_state != NULL)
1011 slice->cls_ops->clo_state(env, slice, state);
1012 cfs_waitq_broadcast(&lock->cll_wq);
1017 * Notifies waiters that lock state changed.
1019 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1020 * layers about state change by calling cl_lock_operations::clo_state()
1023 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1026 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1027 cl_lock_state_signal(env, lock, lock->cll_state);
1030 EXPORT_SYMBOL(cl_lock_signal);
1033 * Changes lock state.
1035 * This function is invoked to notify layers that lock state changed, possible
1036 * as a result of an asynchronous event such as call-back reception.
1038 * \post lock->cll_state == state
1040 * \see cl_lock_operations::clo_state()
1042 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1043 enum cl_lock_state state)
1046 LASSERT(lock->cll_state <= state ||
1047 (lock->cll_state == CLS_CACHED &&
1048 (state == CLS_HELD || /* lock found in cache */
1049 state == CLS_NEW || /* sub-lock canceled */
1050 state == CLS_INTRANSIT)) ||
1051 /* lock is in transit state */
1052 lock->cll_state == CLS_INTRANSIT);
1054 if (lock->cll_state != state) {
1055 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1056 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1058 cl_lock_state_signal(env, lock, state);
1059 lock->cll_state = state;
1063 EXPORT_SYMBOL(cl_lock_state_set);
1065 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1067 const struct cl_lock_slice *slice;
1073 LINVRNT(cl_lock_is_mutexed(lock));
1074 LINVRNT(cl_lock_invariant(env, lock));
1075 LASSERT(lock->cll_state == CLS_INTRANSIT);
1078 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1080 if (slice->cls_ops->clo_unuse != NULL) {
1081 result = slice->cls_ops->clo_unuse(env, slice);
1086 LASSERT(result != -ENOSYS);
1087 } while (result == CLO_REPEAT);
1093 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1094 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1095 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1096 * use process atomic
1098 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1100 const struct cl_lock_slice *slice;
1102 enum cl_lock_state state;
1105 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1107 LASSERT(lock->cll_state == CLS_CACHED);
1108 if (lock->cll_error)
1109 RETURN(lock->cll_error);
1112 state = cl_lock_intransit(env, lock);
1113 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1114 if (slice->cls_ops->clo_use != NULL) {
1115 result = slice->cls_ops->clo_use(env, slice);
1120 LASSERT(result != -ENOSYS);
1122 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1128 if (result == -ESTALE) {
1130 * ESTALE means sublock being cancelled
1131 * at this time, and set lock state to
1132 * be NEW here and ask the caller to repeat.
1135 result = CLO_REPEAT;
1138 /* @atomic means back-off-on-failure. */
1141 rc = cl_unuse_try_internal(env, lock);
1142 /* Vet the results. */
1143 if (rc < 0 && result > 0)
1148 cl_lock_extransit(env, lock, state);
1151 EXPORT_SYMBOL(cl_use_try);
1154 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1157 static int cl_enqueue_kick(const struct lu_env *env,
1158 struct cl_lock *lock,
1159 struct cl_io *io, __u32 flags)
1162 const struct cl_lock_slice *slice;
1166 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1167 if (slice->cls_ops->clo_enqueue != NULL) {
1168 result = slice->cls_ops->clo_enqueue(env,
1174 LASSERT(result != -ENOSYS);
1179 * Tries to enqueue a lock.
1181 * This function is called repeatedly by cl_enqueue() until either lock is
1182 * enqueued, or error occurs. This function does not block waiting for
1183 * networking communication to complete.
1185 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1186 * lock->cll_state == CLS_HELD)
1188 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1189 * \see cl_lock_state::CLS_ENQUEUED
1191 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1192 struct cl_io *io, __u32 flags)
1197 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1199 LINVRNT(cl_lock_is_mutexed(lock));
1201 result = lock->cll_error;
1205 switch (lock->cll_state) {
1207 cl_lock_state_set(env, lock, CLS_QUEUING);
1211 result = cl_enqueue_kick(env, lock, io, flags);
1212 /* For AGL case, the cl_lock::cll_state may
1213 * become CLS_HELD already. */
1214 if (result == 0 && lock->cll_state == CLS_QUEUING)
1215 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1218 LASSERT(cl_lock_is_intransit(lock));
1222 /* yank lock from the cache. */
1223 result = cl_use_try(env, lock, 0);
1232 * impossible, only held locks with increased
1233 * ->cll_holds can be enqueued, and they cannot be
1238 } while (result == CLO_REPEAT);
1241 EXPORT_SYMBOL(cl_enqueue_try);
1244 * Cancel the conflicting lock found during previous enqueue.
1246 * \retval 0 conflicting lock has been canceled.
1247 * \retval -ve error code.
1249 int cl_lock_enqueue_wait(const struct lu_env *env,
1250 struct cl_lock *lock,
1253 struct cl_lock *conflict;
1257 LASSERT(cl_lock_is_mutexed(lock));
1258 LASSERT(lock->cll_state == CLS_QUEUING);
1259 LASSERT(lock->cll_conflict != NULL);
1261 conflict = lock->cll_conflict;
1262 lock->cll_conflict = NULL;
1264 cl_lock_mutex_put(env, lock);
1265 LASSERT(cl_lock_nr_mutexed(env) == 0);
1267 cl_lock_mutex_get(env, conflict);
1268 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1269 cl_lock_cancel(env, conflict);
1270 cl_lock_delete(env, conflict);
1272 while (conflict->cll_state != CLS_FREEING) {
1273 rc = cl_lock_state_wait(env, conflict);
1277 cl_lock_mutex_put(env, conflict);
1278 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1279 cl_lock_put(env, conflict);
1282 cl_lock_mutex_get(env, lock);
1287 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1289 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1290 struct cl_io *io, __u32 enqflags)
1296 LINVRNT(cl_lock_is_mutexed(lock));
1297 LINVRNT(cl_lock_invariant(env, lock));
1298 LASSERT(lock->cll_holds > 0);
1300 cl_lock_user_add(env, lock);
1302 result = cl_enqueue_try(env, lock, io, enqflags);
1303 if (result == CLO_WAIT) {
1304 if (lock->cll_conflict != NULL)
1305 result = cl_lock_enqueue_wait(env, lock, 1);
1307 result = cl_lock_state_wait(env, lock);
1314 cl_unuse_try(env, lock);
1315 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1316 lock->cll_state == CLS_ENQUEUED ||
1317 lock->cll_state == CLS_HELD));
1324 * \pre current thread or io owns a hold on lock.
1326 * \post ergo(result == 0, lock->users increased)
1327 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1328 * lock->cll_state == CLS_HELD)
1330 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1331 struct cl_io *io, __u32 enqflags)
1337 cl_lock_lockdep_acquire(env, lock, enqflags);
1338 cl_lock_mutex_get(env, lock);
1339 result = cl_enqueue_locked(env, lock, io, enqflags);
1340 cl_lock_mutex_put(env, lock);
1342 cl_lock_lockdep_release(env, lock);
1343 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1344 lock->cll_state == CLS_HELD));
1347 EXPORT_SYMBOL(cl_enqueue);
1350 * Tries to unlock a lock.
1352 * This function is called to release underlying resource:
1353 * 1. for top lock, the resource is sublocks it held;
1354 * 2. for sublock, the resource is the reference to dlmlock.
1356 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1358 * \see cl_unuse() cl_lock_operations::clo_unuse()
1359 * \see cl_lock_state::CLS_CACHED
1361 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1364 enum cl_lock_state state = CLS_NEW;
1367 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1369 if (lock->cll_users > 1) {
1370 cl_lock_user_del(env, lock);
1374 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1375 * underlying resources. */
1376 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1377 cl_lock_user_del(env, lock);
1382 * New lock users (->cll_users) are not protecting unlocking
1383 * from proceeding. From this point, lock eventually reaches
1384 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1387 state = cl_lock_intransit(env, lock);
1389 result = cl_unuse_try_internal(env, lock);
1390 LASSERT(lock->cll_state == CLS_INTRANSIT);
1391 LASSERT(result != CLO_WAIT);
1392 cl_lock_user_del(env, lock);
1393 if (result == 0 || result == -ESTALE) {
1395 * Return lock back to the cache. This is the only
1396 * place where lock is moved into CLS_CACHED state.
1398 * If one of ->clo_unuse() methods returned -ESTALE, lock
1399 * cannot be placed into cache and has to be
1400 * re-initialized. This happens e.g., when a sub-lock was
1401 * canceled while unlocking was in progress.
1403 if (state == CLS_HELD && result == 0)
1407 cl_lock_extransit(env, lock, state);
1410 * Hide -ESTALE error.
1411 * If the lock is a glimpse lock, and it has multiple
1412 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1413 * and other sublocks are matched write locks. In this case,
1414 * we can't set this lock to error because otherwise some of
1415 * its sublocks may not be canceled. This causes some dirty
1416 * pages won't be written to OSTs. -jay
1420 CERROR("result = %d, this is unlikely!\n", result);
1422 cl_lock_extransit(env, lock, state);
1424 RETURN(result ?: lock->cll_error);
1426 EXPORT_SYMBOL(cl_unuse_try);
1428 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1433 result = cl_unuse_try(env, lock);
1435 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1443 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1446 cl_lock_mutex_get(env, lock);
1447 cl_unuse_locked(env, lock);
1448 cl_lock_mutex_put(env, lock);
1449 cl_lock_lockdep_release(env, lock);
1452 EXPORT_SYMBOL(cl_unuse);
1455 * Tries to wait for a lock.
1457 * This function is called repeatedly by cl_wait() until either lock is
1458 * granted, or error occurs. This function does not block waiting for network
1459 * communication to complete.
1461 * \see cl_wait() cl_lock_operations::clo_wait()
1462 * \see cl_lock_state::CLS_HELD
1464 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1466 const struct cl_lock_slice *slice;
1470 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1472 LINVRNT(cl_lock_is_mutexed(lock));
1473 LINVRNT(cl_lock_invariant(env, lock));
1474 LASSERTF(lock->cll_state == CLS_QUEUING ||
1475 lock->cll_state == CLS_ENQUEUED ||
1476 lock->cll_state == CLS_HELD ||
1477 lock->cll_state == CLS_INTRANSIT,
1478 "lock state: %d\n", lock->cll_state);
1479 LASSERT(lock->cll_users > 0);
1480 LASSERT(lock->cll_holds > 0);
1482 result = lock->cll_error;
1486 if (cl_lock_is_intransit(lock)) {
1491 if (lock->cll_state == CLS_HELD)
1496 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1497 if (slice->cls_ops->clo_wait != NULL) {
1498 result = slice->cls_ops->clo_wait(env, slice);
1503 LASSERT(result != -ENOSYS);
1505 LASSERT(lock->cll_state != CLS_INTRANSIT);
1506 cl_lock_state_set(env, lock, CLS_HELD);
1508 } while (result == CLO_REPEAT);
1511 EXPORT_SYMBOL(cl_wait_try);
1514 * Waits until enqueued lock is granted.
1516 * \pre current thread or io owns a hold on the lock
1517 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1518 * lock->cll_state == CLS_HELD)
1520 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1522 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1527 cl_lock_mutex_get(env, lock);
1529 LINVRNT(cl_lock_invariant(env, lock));
1530 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1531 "Wrong state %d \n", lock->cll_state);
1532 LASSERT(lock->cll_holds > 0);
1535 result = cl_wait_try(env, lock);
1536 if (result == CLO_WAIT) {
1537 result = cl_lock_state_wait(env, lock);
1544 cl_unuse_try(env, lock);
1545 cl_lock_lockdep_release(env, lock);
1547 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1548 cl_lock_mutex_put(env, lock);
1549 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1552 EXPORT_SYMBOL(cl_wait);
1555 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1558 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1560 const struct cl_lock_slice *slice;
1561 unsigned long pound;
1562 unsigned long ounce;
1565 LINVRNT(cl_lock_is_mutexed(lock));
1566 LINVRNT(cl_lock_invariant(env, lock));
1569 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1570 if (slice->cls_ops->clo_weigh != NULL) {
1571 ounce = slice->cls_ops->clo_weigh(env, slice);
1573 if (pound < ounce) /* over-weight^Wflow */
1579 EXPORT_SYMBOL(cl_lock_weigh);
1582 * Notifies layers that lock description changed.
1584 * The server can grant client a lock different from one that was requested
1585 * (e.g., larger in extent). This method is called when actually granted lock
1586 * description becomes known to let layers to accommodate for changed lock
1589 * \see cl_lock_operations::clo_modify()
1591 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1592 const struct cl_lock_descr *desc)
1594 const struct cl_lock_slice *slice;
1595 struct cl_object *obj = lock->cll_descr.cld_obj;
1596 struct cl_object_header *hdr = cl_object_header(obj);
1600 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1601 /* don't allow object to change */
1602 LASSERT(obj == desc->cld_obj);
1603 LINVRNT(cl_lock_is_mutexed(lock));
1604 LINVRNT(cl_lock_invariant(env, lock));
1606 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1607 if (slice->cls_ops->clo_modify != NULL) {
1608 result = slice->cls_ops->clo_modify(env, slice, desc);
1613 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1614 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1616 * Just replace description in place. Nothing more is needed for
1617 * now. If locks were indexed according to their extent and/or mode,
1618 * that index would have to be updated here.
1620 spin_lock(&hdr->coh_lock_guard);
1621 lock->cll_descr = *desc;
1622 spin_unlock(&hdr->coh_lock_guard);
1625 EXPORT_SYMBOL(cl_lock_modify);
1628 * Initializes lock closure with a given origin.
1630 * \see cl_lock_closure
1632 void cl_lock_closure_init(const struct lu_env *env,
1633 struct cl_lock_closure *closure,
1634 struct cl_lock *origin, int wait)
1636 LINVRNT(cl_lock_is_mutexed(origin));
1637 LINVRNT(cl_lock_invariant(env, origin));
1639 CFS_INIT_LIST_HEAD(&closure->clc_list);
1640 closure->clc_origin = origin;
1641 closure->clc_wait = wait;
1642 closure->clc_nr = 0;
1644 EXPORT_SYMBOL(cl_lock_closure_init);
1647 * Builds a closure of \a lock.
1649 * Building of a closure consists of adding initial lock (\a lock) into it,
1650 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1651 * methods might call cl_lock_closure_build() recursively again, adding more
1652 * locks to the closure, etc.
1654 * \see cl_lock_closure
1656 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1657 struct cl_lock_closure *closure)
1659 const struct cl_lock_slice *slice;
1663 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1664 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1666 result = cl_lock_enclosure(env, lock, closure);
1668 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1669 if (slice->cls_ops->clo_closure != NULL) {
1670 result = slice->cls_ops->clo_closure(env, slice,
1678 cl_lock_disclosure(env, closure);
1681 EXPORT_SYMBOL(cl_lock_closure_build);
1684 * Adds new lock to a closure.
1686 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1687 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1688 * until next try-lock is likely to succeed.
1690 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1691 struct cl_lock_closure *closure)
1695 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1696 if (!cl_lock_mutex_try(env, lock)) {
1698 * If lock->cll_inclosure is not empty, lock is already in
1701 if (cfs_list_empty(&lock->cll_inclosure)) {
1702 cl_lock_get_trust(lock);
1703 lu_ref_add(&lock->cll_reference, "closure", closure);
1704 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1707 cl_lock_mutex_put(env, lock);
1710 cl_lock_disclosure(env, closure);
1711 if (closure->clc_wait) {
1712 cl_lock_get_trust(lock);
1713 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1714 cl_lock_mutex_put(env, closure->clc_origin);
1716 LASSERT(cl_lock_nr_mutexed(env) == 0);
1717 cl_lock_mutex_get(env, lock);
1718 cl_lock_mutex_put(env, lock);
1720 cl_lock_mutex_get(env, closure->clc_origin);
1721 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1722 cl_lock_put(env, lock);
1724 result = CLO_REPEAT;
1728 EXPORT_SYMBOL(cl_lock_enclosure);
1730 /** Releases mutices of enclosed locks. */
1731 void cl_lock_disclosure(const struct lu_env *env,
1732 struct cl_lock_closure *closure)
1734 struct cl_lock *scan;
1735 struct cl_lock *temp;
1737 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1738 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1740 cfs_list_del_init(&scan->cll_inclosure);
1741 cl_lock_mutex_put(env, scan);
1742 lu_ref_del(&scan->cll_reference, "closure", closure);
1743 cl_lock_put(env, scan);
1746 LASSERT(closure->clc_nr == 0);
1748 EXPORT_SYMBOL(cl_lock_disclosure);
1750 /** Finalizes a closure. */
1751 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1753 LASSERT(closure->clc_nr == 0);
1754 LASSERT(cfs_list_empty(&closure->clc_list));
1756 EXPORT_SYMBOL(cl_lock_closure_fini);
1759 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1760 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1761 * destruction until all holds are released. This is called when a decision is
1762 * made to destroy the lock in the future. E.g., when a blocking AST is
1763 * received on it, or fatal communication error happens.
1765 * Caller must have a reference on this lock to prevent a situation, when
1766 * deleted lock lingers in memory for indefinite time, because nobody calls
1767 * cl_lock_put() to finish it.
1769 * \pre atomic_read(&lock->cll_ref) > 0
1770 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1771 * cl_lock_nr_mutexed(env) == 1)
1772 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1773 * held, as deletion of sub-locks might require releasing a top-lock
1776 * \see cl_lock_operations::clo_delete()
1777 * \see cl_lock::cll_holds
1779 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1781 LINVRNT(cl_lock_is_mutexed(lock));
1782 LINVRNT(cl_lock_invariant(env, lock));
1783 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1784 cl_lock_nr_mutexed(env) == 1));
1787 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1788 if (lock->cll_holds == 0)
1789 cl_lock_delete0(env, lock);
1791 lock->cll_flags |= CLF_DOOMED;
1794 EXPORT_SYMBOL(cl_lock_delete);
1797 * Mark lock as irrecoverably failed, and mark it for destruction. This
1798 * happens when, e.g., server fails to grant a lock to us, or networking
1801 * \pre atomic_read(&lock->cll_ref) > 0
1803 * \see clo_lock_delete()
1804 * \see cl_lock::cll_holds
1806 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1808 LINVRNT(cl_lock_is_mutexed(lock));
1809 LINVRNT(cl_lock_invariant(env, lock));
1812 if (lock->cll_error == 0 && error != 0) {
1813 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1814 lock->cll_error = error;
1815 cl_lock_signal(env, lock);
1816 cl_lock_cancel(env, lock);
1817 cl_lock_delete(env, lock);
1821 EXPORT_SYMBOL(cl_lock_error);
1824 * Cancels this lock. Notifies layers
1825 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1826 * there are holds on the lock, postpone cancellation until
1827 * all holds are released.
1829 * Cancellation notification is delivered to layers at most once.
1831 * \see cl_lock_operations::clo_cancel()
1832 * \see cl_lock::cll_holds
1834 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1836 LINVRNT(cl_lock_is_mutexed(lock));
1837 LINVRNT(cl_lock_invariant(env, lock));
1840 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1841 if (lock->cll_holds == 0)
1842 cl_lock_cancel0(env, lock);
1844 lock->cll_flags |= CLF_CANCELPEND;
1847 EXPORT_SYMBOL(cl_lock_cancel);
1850 * Finds an existing lock covering given index and optionally different from a
1851 * given \a except lock.
1853 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1854 struct cl_object *obj, pgoff_t index,
1855 struct cl_lock *except,
1856 int pending, int canceld)
1858 struct cl_object_header *head;
1859 struct cl_lock *scan;
1860 struct cl_lock *lock;
1861 struct cl_lock_descr *need;
1865 head = cl_object_header(obj);
1866 need = &cl_env_info(env)->clt_descr;
1869 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1871 need->cld_start = need->cld_end = index;
1872 need->cld_enq_flags = 0;
1874 spin_lock(&head->coh_lock_guard);
1875 /* It is fine to match any group lock since there could be only one
1876 * with a uniq gid and it conflicts with all other lock modes too */
1877 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1878 if (scan != except &&
1879 (scan->cll_descr.cld_mode == CLM_GROUP ||
1880 cl_lock_ext_match(&scan->cll_descr, need)) &&
1881 scan->cll_state >= CLS_HELD &&
1882 scan->cll_state < CLS_FREEING &&
1884 * This check is racy as the lock can be canceled right
1885 * after it is done, but this is fine, because page exists
1888 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1889 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1890 /* Don't increase cs_hit here since this
1891 * is just a helper function. */
1892 cl_lock_get_trust(scan);
1897 spin_unlock(&head->coh_lock_guard);
1900 EXPORT_SYMBOL(cl_lock_at_pgoff);
1903 * Calculate the page offset at the layer of @lock.
1904 * At the time of this writing, @page is top page and @lock is sub lock.
1906 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1908 struct lu_device_type *dtype;
1909 const struct cl_page_slice *slice;
1911 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1912 slice = cl_page_at(page, dtype);
1913 LASSERT(slice != NULL);
1914 return slice->cpl_page->cp_index;
1918 * Check if page @page is covered by an extra lock or discard it.
1920 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1921 struct cl_page *page, void *cbdata)
1923 struct cl_thread_info *info = cl_env_info(env);
1924 struct cl_lock *lock = cbdata;
1925 pgoff_t index = pgoff_at_lock(page, lock);
1927 if (index >= info->clt_fn_index) {
1928 struct cl_lock *tmp;
1930 /* refresh non-overlapped index */
1931 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1934 /* Cache the first-non-overlapped index so as to skip
1935 * all pages within [index, clt_fn_index). This
1936 * is safe because if tmp lock is canceled, it will
1937 * discard these pages. */
1938 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1939 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1940 info->clt_fn_index = CL_PAGE_EOF;
1941 cl_lock_put(env, tmp);
1942 } else if (cl_page_own(env, io, page) == 0) {
1943 /* discard the page */
1944 cl_page_unmap(env, io, page);
1945 cl_page_discard(env, io, page);
1946 cl_page_disown(env, io, page);
1948 LASSERT(page->cp_state == CPS_FREEING);
1952 info->clt_next_index = index + 1;
1953 return CLP_GANG_OKAY;
1956 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1957 struct cl_page *page, void *cbdata)
1959 struct cl_thread_info *info = cl_env_info(env);
1960 struct cl_lock *lock = cbdata;
1962 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1963 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1964 !PageWriteback(cl_page_vmpage(env, page))));
1965 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1966 !PageDirty(cl_page_vmpage(env, page))));
1968 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1969 if (cl_page_own(env, io, page) == 0) {
1970 /* discard the page */
1971 cl_page_unmap(env, io, page);
1972 cl_page_discard(env, io, page);
1973 cl_page_disown(env, io, page);
1975 LASSERT(page->cp_state == CPS_FREEING);
1978 return CLP_GANG_OKAY;
1982 * Discard pages protected by the given lock. This function traverses radix
1983 * tree to find all covering pages and discard them. If a page is being covered
1984 * by other locks, it should remain in cache.
1986 * If error happens on any step, the process continues anyway (the reasoning
1987 * behind this being that lock cancellation cannot be delayed indefinitely).
1989 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1991 struct cl_thread_info *info = cl_env_info(env);
1992 struct cl_io *io = &info->clt_io;
1993 struct cl_lock_descr *descr = &lock->cll_descr;
1994 cl_page_gang_cb_t cb;
1998 LINVRNT(cl_lock_invariant(env, lock));
2001 io->ci_obj = cl_object_top(descr->cld_obj);
2002 io->ci_ignore_layout = 1;
2003 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2007 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
2008 info->clt_fn_index = info->clt_next_index = descr->cld_start;
2010 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2011 info->clt_next_index, descr->cld_end,
2013 if (info->clt_next_index > descr->cld_end)
2016 if (res == CLP_GANG_RESCHED)
2018 } while (res != CLP_GANG_OKAY);
2020 cl_io_fini(env, io);
2023 EXPORT_SYMBOL(cl_lock_discard_pages);
2026 * Eliminate all locks for a given object.
2028 * Caller has to guarantee that no lock is in active use.
2030 * \param cancel when this is set, cl_locks_prune() cancels locks before
2033 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2035 struct cl_object_header *head;
2036 struct cl_lock *lock;
2039 head = cl_object_header(obj);
2041 * If locks are destroyed without cancellation, all pages must be
2042 * already destroyed (as otherwise they will be left unprotected).
2044 LASSERT(ergo(!cancel,
2045 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2047 spin_lock(&head->coh_lock_guard);
2048 while (!cfs_list_empty(&head->coh_locks)) {
2049 lock = container_of(head->coh_locks.next,
2050 struct cl_lock, cll_linkage);
2051 cl_lock_get_trust(lock);
2052 spin_unlock(&head->coh_lock_guard);
2053 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2056 cl_lock_mutex_get(env, lock);
2057 if (lock->cll_state < CLS_FREEING) {
2058 LASSERT(lock->cll_users <= 1);
2059 if (unlikely(lock->cll_users == 1)) {
2060 struct l_wait_info lwi = { 0 };
2062 cl_lock_mutex_put(env, lock);
2063 l_wait_event(lock->cll_wq,
2064 lock->cll_users == 0,
2070 cl_lock_cancel(env, lock);
2071 cl_lock_delete(env, lock);
2073 cl_lock_mutex_put(env, lock);
2074 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2075 cl_lock_put(env, lock);
2076 spin_lock(&head->coh_lock_guard);
2078 spin_unlock(&head->coh_lock_guard);
2081 EXPORT_SYMBOL(cl_locks_prune);
2083 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2084 const struct cl_io *io,
2085 const struct cl_lock_descr *need,
2086 const char *scope, const void *source)
2088 struct cl_lock *lock;
2093 lock = cl_lock_find(env, io, need);
2096 cl_lock_mutex_get(env, lock);
2097 if (lock->cll_state < CLS_FREEING &&
2098 !(lock->cll_flags & CLF_CANCELLED)) {
2099 cl_lock_hold_mod(env, lock, +1);
2100 lu_ref_add(&lock->cll_holders, scope, source);
2101 lu_ref_add(&lock->cll_reference, scope, source);
2104 cl_lock_mutex_put(env, lock);
2105 cl_lock_put(env, lock);
2111 * Returns a lock matching \a need description with a reference and a hold on
2114 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2115 * guarantees that lock is not in the CLS_FREEING state on return.
2117 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2118 const struct cl_lock_descr *need,
2119 const char *scope, const void *source)
2121 struct cl_lock *lock;
2125 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2127 cl_lock_mutex_put(env, lock);
2130 EXPORT_SYMBOL(cl_lock_hold);
2133 * Main high-level entry point of cl_lock interface that finds existing or
2134 * enqueues new lock matching given description.
2136 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2137 const struct cl_lock_descr *need,
2138 const char *scope, const void *source)
2140 struct cl_lock *lock;
2142 __u32 enqflags = need->cld_enq_flags;
2146 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2150 rc = cl_enqueue_locked(env, lock, io, enqflags);
2152 if (cl_lock_fits_into(env, lock, need, io)) {
2153 if (!(enqflags & CEF_AGL)) {
2154 cl_lock_mutex_put(env, lock);
2155 cl_lock_lockdep_acquire(env, lock,
2161 cl_unuse_locked(env, lock);
2163 cl_lock_trace(D_DLMTRACE, env,
2164 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2165 cl_lock_hold_release(env, lock, scope, source);
2166 cl_lock_mutex_put(env, lock);
2167 lu_ref_del(&lock->cll_reference, scope, source);
2168 cl_lock_put(env, lock);
2170 LASSERT(enqflags & CEF_AGL);
2172 } else if (rc != 0) {
2178 EXPORT_SYMBOL(cl_lock_request);
2181 * Adds a hold to a known lock.
2183 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2184 const char *scope, const void *source)
2186 LINVRNT(cl_lock_is_mutexed(lock));
2187 LINVRNT(cl_lock_invariant(env, lock));
2188 LASSERT(lock->cll_state != CLS_FREEING);
2191 cl_lock_hold_mod(env, lock, +1);
2193 lu_ref_add(&lock->cll_holders, scope, source);
2194 lu_ref_add(&lock->cll_reference, scope, source);
2197 EXPORT_SYMBOL(cl_lock_hold_add);
2200 * Releases a hold and a reference on a lock, on which caller acquired a
2203 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2204 const char *scope, const void *source)
2206 LINVRNT(cl_lock_invariant(env, lock));
2208 cl_lock_hold_release(env, lock, scope, source);
2209 lu_ref_del(&lock->cll_reference, scope, source);
2210 cl_lock_put(env, lock);
2213 EXPORT_SYMBOL(cl_lock_unhold);
2216 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2218 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2219 const char *scope, const void *source)
2221 LINVRNT(cl_lock_invariant(env, lock));
2223 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2224 cl_lock_mutex_get(env, lock);
2225 cl_lock_hold_release(env, lock, scope, source);
2226 cl_lock_mutex_put(env, lock);
2227 lu_ref_del(&lock->cll_reference, scope, source);
2228 cl_lock_put(env, lock);
2231 EXPORT_SYMBOL(cl_lock_release);
2233 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2235 LINVRNT(cl_lock_is_mutexed(lock));
2236 LINVRNT(cl_lock_invariant(env, lock));
2239 cl_lock_used_mod(env, lock, +1);
2242 EXPORT_SYMBOL(cl_lock_user_add);
2244 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2246 LINVRNT(cl_lock_is_mutexed(lock));
2247 LINVRNT(cl_lock_invariant(env, lock));
2248 LASSERT(lock->cll_users > 0);
2251 cl_lock_used_mod(env, lock, -1);
2252 if (lock->cll_users == 0)
2253 cfs_waitq_broadcast(&lock->cll_wq);
2256 EXPORT_SYMBOL(cl_lock_user_del);
2258 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2260 static const char *names[] = {
2261 [CLM_PHANTOM] = "P",
2266 if (0 <= mode && mode < ARRAY_SIZE(names))
2271 EXPORT_SYMBOL(cl_lock_mode_name);
2274 * Prints human readable representation of a lock description.
2276 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2277 lu_printer_t printer,
2278 const struct cl_lock_descr *descr)
2280 const struct lu_fid *fid;
2282 fid = lu_object_fid(&descr->cld_obj->co_lu);
2283 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2285 EXPORT_SYMBOL(cl_lock_descr_print);
2288 * Prints human readable representation of \a lock to the \a f.
2290 void cl_lock_print(const struct lu_env *env, void *cookie,
2291 lu_printer_t printer, const struct cl_lock *lock)
2293 const struct cl_lock_slice *slice;
2294 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2295 lock, cfs_atomic_read(&lock->cll_ref),
2296 lock->cll_state, lock->cll_error, lock->cll_holds,
2297 lock->cll_users, lock->cll_flags);
2298 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2299 (*printer)(env, cookie, " {\n");
2301 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2302 (*printer)(env, cookie, " %s@%p: ",
2303 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2305 if (slice->cls_ops->clo_print != NULL)
2306 slice->cls_ops->clo_print(env, cookie, printer, slice);
2307 (*printer)(env, cookie, "\n");
2309 (*printer)(env, cookie, "} lock@%p\n", lock);
2311 EXPORT_SYMBOL(cl_lock_print);
2313 int cl_lock_init(void)
2315 return lu_kmem_init(cl_lock_caches);
2318 void cl_lock_fini(void)
2320 lu_kmem_fini(cl_lock_caches);