4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
50 #include <cl_object.h>
51 #include "cl_internal.h"
53 /** Lock class of cl_lock::cll_guard */
54 static struct lock_class_key cl_lock_guard_class;
55 static cfs_mem_cache_t *cl_lock_kmem;
57 static struct lu_kmem_descr cl_lock_caches[] = {
59 .ckd_cache = &cl_lock_kmem,
60 .ckd_name = "cl_lock_kmem",
61 .ckd_size = sizeof (struct cl_lock)
68 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
69 #define CS_LOCK_INC(o, item) \
70 cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
71 #define CS_LOCK_DEC(o, item) \
72 cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
73 #define CS_LOCKSTATE_INC(o, state) \
74 cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
75 #define CS_LOCKSTATE_DEC(o, state) \
76 cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
78 #define CS_LOCK_INC(o, item)
79 #define CS_LOCK_DEC(o, item)
80 #define CS_LOCKSTATE_INC(o, state)
81 #define CS_LOCKSTATE_DEC(o, state)
85 * Basic lock invariant that is maintained at all times. Caller either has a
86 * reference to \a lock, or somehow assures that \a lock cannot be freed.
88 * \see cl_lock_invariant()
90 static int cl_lock_invariant_trusted(const struct lu_env *env,
91 const struct cl_lock *lock)
93 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
94 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
95 lock->cll_holds >= lock->cll_users &&
96 lock->cll_holds >= 0 &&
97 lock->cll_users >= 0 &&
102 * Stronger lock invariant, checking that caller has a reference on a lock.
104 * \see cl_lock_invariant_trusted()
106 static int cl_lock_invariant(const struct lu_env *env,
107 const struct cl_lock *lock)
111 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
112 cl_lock_invariant_trusted(env, lock);
113 if (!result && env != NULL)
114 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
119 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
121 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
123 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
127 * Returns a set of counters for this lock, depending on a lock nesting.
129 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
130 const struct cl_lock *lock)
132 struct cl_thread_info *info;
133 enum clt_nesting_level nesting;
135 info = cl_env_info(env);
136 nesting = cl_lock_nesting(lock);
137 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
138 return &info->clt_counters[nesting];
141 static void cl_lock_trace0(int level, const struct lu_env *env,
142 const char *prefix, const struct cl_lock *lock,
143 const char *func, const int line)
145 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
146 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
147 "(%p/%d/%d) at %s():%d\n",
148 prefix, lock, cfs_atomic_read(&lock->cll_ref),
149 lock->cll_guarder, lock->cll_depth,
150 lock->cll_state, lock->cll_error, lock->cll_holds,
151 lock->cll_users, lock->cll_flags,
152 env, h->coh_nesting, cl_lock_nr_mutexed(env),
155 #define cl_lock_trace(level, env, prefix, lock) \
156 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
158 #define RETIP ((unsigned long)__builtin_return_address(0))
160 #ifdef CONFIG_LOCKDEP
161 static struct lock_class_key cl_lock_key;
163 static void cl_lock_lockdep_init(struct cl_lock *lock)
165 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
168 static void cl_lock_lockdep_acquire(const struct lu_env *env,
169 struct cl_lock *lock, __u32 enqflags)
171 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
172 #ifdef HAVE_LOCK_MAP_ACQUIRE
173 lock_map_acquire(&lock->dep_map);
174 #else /* HAVE_LOCK_MAP_ACQUIRE */
175 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
176 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
177 /* check: */ 2, RETIP);
178 #endif /* HAVE_LOCK_MAP_ACQUIRE */
181 static void cl_lock_lockdep_release(const struct lu_env *env,
182 struct cl_lock *lock)
184 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
185 lock_release(&lock->dep_map, 0, RETIP);
188 #else /* !CONFIG_LOCKDEP */
190 static void cl_lock_lockdep_init(struct cl_lock *lock)
192 static void cl_lock_lockdep_acquire(const struct lu_env *env,
193 struct cl_lock *lock, __u32 enqflags)
195 static void cl_lock_lockdep_release(const struct lu_env *env,
196 struct cl_lock *lock)
199 #endif /* !CONFIG_LOCKDEP */
202 * Adds lock slice to the compound lock.
204 * This is called by cl_object_operations::coo_lock_init() methods to add a
205 * per-layer state to the lock. New state is added at the end of
206 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
208 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
210 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
211 struct cl_object *obj,
212 const struct cl_lock_operations *ops)
215 slice->cls_lock = lock;
216 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
217 slice->cls_obj = obj;
218 slice->cls_ops = ops;
221 EXPORT_SYMBOL(cl_lock_slice_add);
224 * Returns true iff a lock with the mode \a has provides at least the same
225 * guarantees as a lock with the mode \a need.
227 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
229 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
230 need == CLM_PHANTOM || need == CLM_GROUP);
231 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
232 has == CLM_PHANTOM || has == CLM_GROUP);
233 CLASSERT(CLM_PHANTOM < CLM_READ);
234 CLASSERT(CLM_READ < CLM_WRITE);
235 CLASSERT(CLM_WRITE < CLM_GROUP);
237 if (has != CLM_GROUP)
242 EXPORT_SYMBOL(cl_lock_mode_match);
245 * Returns true iff extent portions of lock descriptions match.
247 int cl_lock_ext_match(const struct cl_lock_descr *has,
248 const struct cl_lock_descr *need)
251 has->cld_start <= need->cld_start &&
252 has->cld_end >= need->cld_end &&
253 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
254 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
256 EXPORT_SYMBOL(cl_lock_ext_match);
259 * Returns true iff a lock with the description \a has provides at least the
260 * same guarantees as a lock with the description \a need.
262 int cl_lock_descr_match(const struct cl_lock_descr *has,
263 const struct cl_lock_descr *need)
266 cl_object_same(has->cld_obj, need->cld_obj) &&
267 cl_lock_ext_match(has, need);
269 EXPORT_SYMBOL(cl_lock_descr_match);
271 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
273 struct cl_object *obj = lock->cll_descr.cld_obj;
275 LINVRNT(!cl_lock_is_mutexed(lock));
278 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
280 while (!cfs_list_empty(&lock->cll_layers)) {
281 struct cl_lock_slice *slice;
283 slice = cfs_list_entry(lock->cll_layers.next,
284 struct cl_lock_slice, cls_linkage);
285 cfs_list_del_init(lock->cll_layers.next);
286 slice->cls_ops->clo_fini(env, slice);
288 CS_LOCK_DEC(obj, total);
289 CS_LOCKSTATE_DEC(obj, lock->cll_state);
290 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
291 cl_object_put(env, obj);
292 lu_ref_fini(&lock->cll_reference);
293 lu_ref_fini(&lock->cll_holders);
294 mutex_destroy(&lock->cll_guard);
295 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
300 * Releases a reference on a lock.
302 * When last reference is released, lock is returned to the cache, unless it
303 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
306 * \see cl_object_put(), cl_page_put()
308 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
310 struct cl_object *obj;
311 struct cl_site *site;
313 LINVRNT(cl_lock_invariant(env, lock));
315 obj = lock->cll_descr.cld_obj;
316 LINVRNT(obj != NULL);
317 site = cl_object_site(obj);
319 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
320 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
322 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
323 if (lock->cll_state == CLS_FREEING) {
324 LASSERT(cfs_list_empty(&lock->cll_linkage));
325 cl_lock_free(env, lock);
327 CS_LOCK_DEC(obj, busy);
331 EXPORT_SYMBOL(cl_lock_put);
334 * Acquires an additional reference to a lock.
336 * This can be called only by caller already possessing a reference to \a
339 * \see cl_object_get(), cl_page_get()
341 void cl_lock_get(struct cl_lock *lock)
343 LINVRNT(cl_lock_invariant(NULL, lock));
344 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
345 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
346 cfs_atomic_inc(&lock->cll_ref);
348 EXPORT_SYMBOL(cl_lock_get);
351 * Acquires a reference to a lock.
353 * This is much like cl_lock_get(), except that this function can be used to
354 * acquire initial reference to the cached lock. Caller has to deal with all
355 * possible races. Use with care!
357 * \see cl_page_get_trust()
359 void cl_lock_get_trust(struct cl_lock *lock)
361 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
362 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
363 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
364 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
366 EXPORT_SYMBOL(cl_lock_get_trust);
369 * Helper function destroying the lock that wasn't completely initialized.
371 * Other threads can acquire references to the top-lock through its
372 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
374 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
376 cl_lock_mutex_get(env, lock);
377 cl_lock_cancel(env, lock);
378 cl_lock_delete(env, lock);
379 cl_lock_mutex_put(env, lock);
380 cl_lock_put(env, lock);
383 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
384 struct cl_object *obj,
385 const struct cl_io *io,
386 const struct cl_lock_descr *descr)
388 struct cl_lock *lock;
389 struct lu_object_header *head;
392 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
394 cfs_atomic_set(&lock->cll_ref, 1);
395 lock->cll_descr = *descr;
396 lock->cll_state = CLS_NEW;
398 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
400 CFS_INIT_LIST_HEAD(&lock->cll_layers);
401 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
402 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
403 lu_ref_init(&lock->cll_reference);
404 lu_ref_init(&lock->cll_holders);
405 mutex_init(&lock->cll_guard);
406 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
407 cfs_waitq_init(&lock->cll_wq);
408 head = obj->co_lu.lo_header;
409 CS_LOCKSTATE_INC(obj, CLS_NEW);
410 CS_LOCK_INC(obj, total);
411 CS_LOCK_INC(obj, create);
412 cl_lock_lockdep_init(lock);
413 cfs_list_for_each_entry(obj, &head->loh_layers,
417 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
419 cl_lock_finish(env, lock);
425 lock = ERR_PTR(-ENOMEM);
430 * Transfer the lock into INTRANSIT state and return the original state.
432 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
433 * \post state: CLS_INTRANSIT
436 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
437 struct cl_lock *lock)
439 enum cl_lock_state state = lock->cll_state;
441 LASSERT(cl_lock_is_mutexed(lock));
442 LASSERT(state != CLS_INTRANSIT);
443 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
444 "Malformed lock state %d.\n", state);
446 cl_lock_state_set(env, lock, CLS_INTRANSIT);
447 lock->cll_intransit_owner = cfs_current();
448 cl_lock_hold_add(env, lock, "intransit", cfs_current());
451 EXPORT_SYMBOL(cl_lock_intransit);
454 * Exit the intransit state and restore the lock state to the original state
456 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
457 enum cl_lock_state state)
459 LASSERT(cl_lock_is_mutexed(lock));
460 LASSERT(lock->cll_state == CLS_INTRANSIT);
461 LASSERT(state != CLS_INTRANSIT);
462 LASSERT(lock->cll_intransit_owner == cfs_current());
464 lock->cll_intransit_owner = NULL;
465 cl_lock_state_set(env, lock, state);
466 cl_lock_unhold(env, lock, "intransit", cfs_current());
468 EXPORT_SYMBOL(cl_lock_extransit);
471 * Checking whether the lock is intransit state
473 int cl_lock_is_intransit(struct cl_lock *lock)
475 LASSERT(cl_lock_is_mutexed(lock));
476 return lock->cll_state == CLS_INTRANSIT &&
477 lock->cll_intransit_owner != cfs_current();
479 EXPORT_SYMBOL(cl_lock_is_intransit);
481 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
482 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
483 * cover multiple stripes and can trigger cascading timeouts.
485 static int cl_lock_fits_into(const struct lu_env *env,
486 const struct cl_lock *lock,
487 const struct cl_lock_descr *need,
488 const struct cl_io *io)
490 const struct cl_lock_slice *slice;
492 LINVRNT(cl_lock_invariant_trusted(env, lock));
494 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
495 if (slice->cls_ops->clo_fits_into != NULL &&
496 !slice->cls_ops->clo_fits_into(env, slice, need, io))
502 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
503 struct cl_object *obj,
504 const struct cl_io *io,
505 const struct cl_lock_descr *need)
507 struct cl_lock *lock;
508 struct cl_object_header *head;
512 head = cl_object_header(obj);
513 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
514 CS_LOCK_INC(obj, lookup);
515 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
518 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
519 lock->cll_state < CLS_FREEING &&
520 lock->cll_error == 0 &&
521 !(lock->cll_flags & CLF_CANCELLED) &&
522 cl_lock_fits_into(env, lock, need, io);
523 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
524 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
527 cl_lock_get_trust(lock);
528 CS_LOCK_INC(obj, hit);
536 * Returns a lock matching description \a need.
538 * This is the main entry point into the cl_lock caching interface. First, a
539 * cache (implemented as a per-object linked list) is consulted. If lock is
540 * found there, it is returned immediately. Otherwise new lock is allocated
541 * and returned. In any case, additional reference to lock is acquired.
543 * \see cl_object_find(), cl_page_find()
545 static struct cl_lock *cl_lock_find(const struct lu_env *env,
546 const struct cl_io *io,
547 const struct cl_lock_descr *need)
549 struct cl_object_header *head;
550 struct cl_object *obj;
551 struct cl_lock *lock;
556 head = cl_object_header(obj);
558 spin_lock(&head->coh_lock_guard);
559 lock = cl_lock_lookup(env, obj, io, need);
560 spin_unlock(&head->coh_lock_guard);
563 lock = cl_lock_alloc(env, obj, io, need);
565 struct cl_lock *ghost;
567 spin_lock(&head->coh_lock_guard);
568 ghost = cl_lock_lookup(env, obj, io, need);
570 cfs_list_add_tail(&lock->cll_linkage,
572 spin_unlock(&head->coh_lock_guard);
573 CS_LOCK_INC(obj, busy);
575 spin_unlock(&head->coh_lock_guard);
577 * Other threads can acquire references to the
578 * top-lock through its sub-locks. Hence, it
579 * cannot be cl_lock_free()-ed immediately.
581 cl_lock_finish(env, lock);
590 * Returns existing lock matching given description. This is similar to
591 * cl_lock_find() except that no new lock is created, and returned lock is
592 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
594 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
595 const struct cl_lock_descr *need,
596 const char *scope, const void *source)
598 struct cl_object_header *head;
599 struct cl_object *obj;
600 struct cl_lock *lock;
603 head = cl_object_header(obj);
606 spin_lock(&head->coh_lock_guard);
607 lock = cl_lock_lookup(env, obj, io, need);
608 spin_unlock(&head->coh_lock_guard);
612 cl_lock_mutex_get(env, lock);
613 if (lock->cll_state == CLS_INTRANSIT)
614 /* Don't care return value. */
615 cl_lock_state_wait(env, lock);
616 if (lock->cll_state == CLS_FREEING) {
617 cl_lock_mutex_put(env, lock);
618 cl_lock_put(env, lock);
621 } while (lock == NULL);
623 cl_lock_hold_add(env, lock, scope, source);
624 cl_lock_user_add(env, lock);
625 if (lock->cll_state == CLS_CACHED)
626 cl_use_try(env, lock, 1);
627 if (lock->cll_state == CLS_HELD) {
628 cl_lock_mutex_put(env, lock);
629 cl_lock_lockdep_acquire(env, lock, 0);
630 cl_lock_put(env, lock);
632 cl_unuse_try(env, lock);
633 cl_lock_unhold(env, lock, scope, source);
634 cl_lock_mutex_put(env, lock);
635 cl_lock_put(env, lock);
641 EXPORT_SYMBOL(cl_lock_peek);
644 * Returns a slice within a lock, corresponding to the given layer in the
649 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
650 const struct lu_device_type *dtype)
652 const struct cl_lock_slice *slice;
654 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
657 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
658 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
663 EXPORT_SYMBOL(cl_lock_at);
665 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
667 struct cl_thread_counters *counters;
669 counters = cl_lock_counters(env, lock);
671 counters->ctc_nr_locks_locked++;
672 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
673 cl_lock_trace(D_TRACE, env, "got mutex", lock);
677 * Locks cl_lock object.
679 * This is used to manipulate cl_lock fields, and to serialize state
680 * transitions in the lock state machine.
682 * \post cl_lock_is_mutexed(lock)
684 * \see cl_lock_mutex_put()
686 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
688 LINVRNT(cl_lock_invariant(env, lock));
690 if (lock->cll_guarder == cfs_current()) {
691 LINVRNT(cl_lock_is_mutexed(lock));
692 LINVRNT(lock->cll_depth > 0);
694 struct cl_object_header *hdr;
695 struct cl_thread_info *info;
698 LINVRNT(lock->cll_guarder != cfs_current());
699 hdr = cl_object_header(lock->cll_descr.cld_obj);
701 * Check that mutices are taken in the bottom-to-top order.
703 info = cl_env_info(env);
704 for (i = 0; i < hdr->coh_nesting; ++i)
705 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
706 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
707 lock->cll_guarder = cfs_current();
708 LINVRNT(lock->cll_depth == 0);
710 cl_lock_mutex_tail(env, lock);
712 EXPORT_SYMBOL(cl_lock_mutex_get);
715 * Try-locks cl_lock object.
717 * \retval 0 \a lock was successfully locked
719 * \retval -EBUSY \a lock cannot be locked right now
721 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
723 * \see cl_lock_mutex_get()
725 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
729 LINVRNT(cl_lock_invariant_trusted(env, lock));
733 if (lock->cll_guarder == cfs_current()) {
734 LINVRNT(lock->cll_depth > 0);
735 cl_lock_mutex_tail(env, lock);
736 } else if (mutex_trylock(&lock->cll_guard)) {
737 LINVRNT(lock->cll_depth == 0);
738 lock->cll_guarder = cfs_current();
739 cl_lock_mutex_tail(env, lock);
744 EXPORT_SYMBOL(cl_lock_mutex_try);
747 {* Unlocks cl_lock object.
749 * \pre cl_lock_is_mutexed(lock)
751 * \see cl_lock_mutex_get()
753 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
755 struct cl_thread_counters *counters;
757 LINVRNT(cl_lock_invariant(env, lock));
758 LINVRNT(cl_lock_is_mutexed(lock));
759 LINVRNT(lock->cll_guarder == cfs_current());
760 LINVRNT(lock->cll_depth > 0);
762 counters = cl_lock_counters(env, lock);
763 LINVRNT(counters->ctc_nr_locks_locked > 0);
765 cl_lock_trace(D_TRACE, env, "put mutex", lock);
766 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
767 counters->ctc_nr_locks_locked--;
768 if (--lock->cll_depth == 0) {
769 lock->cll_guarder = NULL;
770 mutex_unlock(&lock->cll_guard);
773 EXPORT_SYMBOL(cl_lock_mutex_put);
776 * Returns true iff lock's mutex is owned by the current thread.
778 int cl_lock_is_mutexed(struct cl_lock *lock)
780 return lock->cll_guarder == cfs_current();
782 EXPORT_SYMBOL(cl_lock_is_mutexed);
785 * Returns number of cl_lock mutices held by the current thread (environment).
787 int cl_lock_nr_mutexed(const struct lu_env *env)
789 struct cl_thread_info *info;
794 * NOTE: if summation across all nesting levels (currently 2) proves
795 * too expensive, a summary counter can be added to
796 * struct cl_thread_info.
798 info = cl_env_info(env);
799 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
800 locked += info->clt_counters[i].ctc_nr_locks_locked;
803 EXPORT_SYMBOL(cl_lock_nr_mutexed);
805 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
807 LINVRNT(cl_lock_is_mutexed(lock));
808 LINVRNT(cl_lock_invariant(env, lock));
810 if (!(lock->cll_flags & CLF_CANCELLED)) {
811 const struct cl_lock_slice *slice;
813 lock->cll_flags |= CLF_CANCELLED;
814 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
816 if (slice->cls_ops->clo_cancel != NULL)
817 slice->cls_ops->clo_cancel(env, slice);
823 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
825 struct cl_object_header *head;
826 const struct cl_lock_slice *slice;
828 LINVRNT(cl_lock_is_mutexed(lock));
829 LINVRNT(cl_lock_invariant(env, lock));
832 if (lock->cll_state < CLS_FREEING) {
833 LASSERT(lock->cll_state != CLS_INTRANSIT);
834 cl_lock_state_set(env, lock, CLS_FREEING);
836 head = cl_object_header(lock->cll_descr.cld_obj);
838 spin_lock(&head->coh_lock_guard);
839 cfs_list_del_init(&lock->cll_linkage);
840 spin_unlock(&head->coh_lock_guard);
843 * From now on, no new references to this lock can be acquired
844 * by cl_lock_lookup().
846 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
848 if (slice->cls_ops->clo_delete != NULL)
849 slice->cls_ops->clo_delete(env, slice);
852 * From now on, no new references to this lock can be acquired
853 * by layer-specific means (like a pointer from struct
854 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
857 * Lock will be finally freed in cl_lock_put() when last of
858 * existing references goes away.
865 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
866 * top-lock (nesting == 0) accounts for this modification in the per-thread
867 * debugging counters. Sub-lock holds can be released by a thread different
868 * from one that acquired it.
870 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
873 struct cl_thread_counters *counters;
874 enum clt_nesting_level nesting;
876 lock->cll_holds += delta;
877 nesting = cl_lock_nesting(lock);
878 if (nesting == CNL_TOP) {
879 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
880 counters->ctc_nr_held += delta;
881 LASSERT(counters->ctc_nr_held >= 0);
886 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
887 * cl_lock_hold_mod() for the explanation of the debugging code.
889 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
892 struct cl_thread_counters *counters;
893 enum clt_nesting_level nesting;
895 lock->cll_users += delta;
896 nesting = cl_lock_nesting(lock);
897 if (nesting == CNL_TOP) {
898 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
899 counters->ctc_nr_used += delta;
900 LASSERT(counters->ctc_nr_used >= 0);
904 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
905 const char *scope, const void *source)
907 LINVRNT(cl_lock_is_mutexed(lock));
908 LINVRNT(cl_lock_invariant(env, lock));
909 LASSERT(lock->cll_holds > 0);
912 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
913 lu_ref_del(&lock->cll_holders, scope, source);
914 cl_lock_hold_mod(env, lock, -1);
915 if (lock->cll_holds == 0) {
916 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
917 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
918 lock->cll_descr.cld_mode == CLM_GROUP ||
919 lock->cll_state != CLS_CACHED)
921 * If lock is still phantom or grouplock when user is
922 * done with it---destroy the lock.
924 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
925 if (lock->cll_flags & CLF_CANCELPEND) {
926 lock->cll_flags &= ~CLF_CANCELPEND;
927 cl_lock_cancel0(env, lock);
929 if (lock->cll_flags & CLF_DOOMED) {
930 /* no longer doomed: it's dead... Jim. */
931 lock->cll_flags &= ~CLF_DOOMED;
932 cl_lock_delete0(env, lock);
937 EXPORT_SYMBOL(cl_lock_hold_release);
940 * Waits until lock state is changed.
942 * This function is called with cl_lock mutex locked, atomically releases
943 * mutex and goes to sleep, waiting for a lock state change (signaled by
944 * cl_lock_signal()), and re-acquires the mutex before return.
946 * This function is used to wait until lock state machine makes some progress
947 * and to emulate synchronous operations on top of asynchronous lock
950 * \retval -EINTR wait was interrupted
952 * \retval 0 wait wasn't interrupted
954 * \pre cl_lock_is_mutexed(lock)
956 * \see cl_lock_signal()
958 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
960 cfs_waitlink_t waiter;
961 cfs_sigset_t blocked;
965 LINVRNT(cl_lock_is_mutexed(lock));
966 LINVRNT(cl_lock_invariant(env, lock));
967 LASSERT(lock->cll_depth == 1);
968 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
970 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
971 result = lock->cll_error;
973 /* To avoid being interrupted by the 'non-fatal' signals
974 * (SIGCHLD, for instance), we'd block them temporarily.
976 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
978 cfs_waitlink_init(&waiter);
979 cfs_waitq_add(&lock->cll_wq, &waiter);
980 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
981 cl_lock_mutex_put(env, lock);
983 LASSERT(cl_lock_nr_mutexed(env) == 0);
986 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
987 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
988 if (!cfs_signal_pending())
992 cl_lock_mutex_get(env, lock);
993 cfs_set_current_state(CFS_TASK_RUNNING);
994 cfs_waitq_del(&lock->cll_wq, &waiter);
996 /* Restore old blocked signals */
997 cfs_restore_sigs(blocked);
1001 EXPORT_SYMBOL(cl_lock_state_wait);
1003 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
1004 enum cl_lock_state state)
1006 const struct cl_lock_slice *slice;
1009 LINVRNT(cl_lock_is_mutexed(lock));
1010 LINVRNT(cl_lock_invariant(env, lock));
1012 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
1013 if (slice->cls_ops->clo_state != NULL)
1014 slice->cls_ops->clo_state(env, slice, state);
1015 cfs_waitq_broadcast(&lock->cll_wq);
1020 * Notifies waiters that lock state changed.
1022 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1023 * layers about state change by calling cl_lock_operations::clo_state()
1026 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1029 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1030 cl_lock_state_signal(env, lock, lock->cll_state);
1033 EXPORT_SYMBOL(cl_lock_signal);
1036 * Changes lock state.
1038 * This function is invoked to notify layers that lock state changed, possible
1039 * as a result of an asynchronous event such as call-back reception.
1041 * \post lock->cll_state == state
1043 * \see cl_lock_operations::clo_state()
1045 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1046 enum cl_lock_state state)
1049 LASSERT(lock->cll_state <= state ||
1050 (lock->cll_state == CLS_CACHED &&
1051 (state == CLS_HELD || /* lock found in cache */
1052 state == CLS_NEW || /* sub-lock canceled */
1053 state == CLS_INTRANSIT)) ||
1054 /* lock is in transit state */
1055 lock->cll_state == CLS_INTRANSIT);
1057 if (lock->cll_state != state) {
1058 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1059 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1061 cl_lock_state_signal(env, lock, state);
1062 lock->cll_state = state;
1066 EXPORT_SYMBOL(cl_lock_state_set);
1068 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1070 const struct cl_lock_slice *slice;
1076 LINVRNT(cl_lock_is_mutexed(lock));
1077 LINVRNT(cl_lock_invariant(env, lock));
1078 LASSERT(lock->cll_state == CLS_INTRANSIT);
1081 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1083 if (slice->cls_ops->clo_unuse != NULL) {
1084 result = slice->cls_ops->clo_unuse(env, slice);
1089 LASSERT(result != -ENOSYS);
1090 } while (result == CLO_REPEAT);
1096 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1097 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1098 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1099 * use process atomic
1101 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1103 const struct cl_lock_slice *slice;
1105 enum cl_lock_state state;
1108 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1110 LASSERT(lock->cll_state == CLS_CACHED);
1111 if (lock->cll_error)
1112 RETURN(lock->cll_error);
1115 state = cl_lock_intransit(env, lock);
1116 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1117 if (slice->cls_ops->clo_use != NULL) {
1118 result = slice->cls_ops->clo_use(env, slice);
1123 LASSERT(result != -ENOSYS);
1125 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1131 if (result == -ESTALE) {
1133 * ESTALE means sublock being cancelled
1134 * at this time, and set lock state to
1135 * be NEW here and ask the caller to repeat.
1138 result = CLO_REPEAT;
1141 /* @atomic means back-off-on-failure. */
1144 rc = cl_unuse_try_internal(env, lock);
1145 /* Vet the results. */
1146 if (rc < 0 && result > 0)
1151 cl_lock_extransit(env, lock, state);
1154 EXPORT_SYMBOL(cl_use_try);
1157 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1160 static int cl_enqueue_kick(const struct lu_env *env,
1161 struct cl_lock *lock,
1162 struct cl_io *io, __u32 flags)
1165 const struct cl_lock_slice *slice;
1169 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1170 if (slice->cls_ops->clo_enqueue != NULL) {
1171 result = slice->cls_ops->clo_enqueue(env,
1177 LASSERT(result != -ENOSYS);
1182 * Tries to enqueue a lock.
1184 * This function is called repeatedly by cl_enqueue() until either lock is
1185 * enqueued, or error occurs. This function does not block waiting for
1186 * networking communication to complete.
1188 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1189 * lock->cll_state == CLS_HELD)
1191 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1192 * \see cl_lock_state::CLS_ENQUEUED
1194 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1195 struct cl_io *io, __u32 flags)
1200 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1202 LINVRNT(cl_lock_is_mutexed(lock));
1204 result = lock->cll_error;
1208 switch (lock->cll_state) {
1210 cl_lock_state_set(env, lock, CLS_QUEUING);
1214 result = cl_enqueue_kick(env, lock, io, flags);
1215 /* For AGL case, the cl_lock::cll_state may
1216 * become CLS_HELD already. */
1217 if (result == 0 && lock->cll_state == CLS_QUEUING)
1218 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1221 LASSERT(cl_lock_is_intransit(lock));
1225 /* yank lock from the cache. */
1226 result = cl_use_try(env, lock, 0);
1235 * impossible, only held locks with increased
1236 * ->cll_holds can be enqueued, and they cannot be
1241 } while (result == CLO_REPEAT);
1244 EXPORT_SYMBOL(cl_enqueue_try);
1247 * Cancel the conflicting lock found during previous enqueue.
1249 * \retval 0 conflicting lock has been canceled.
1250 * \retval -ve error code.
1252 int cl_lock_enqueue_wait(const struct lu_env *env,
1253 struct cl_lock *lock,
1256 struct cl_lock *conflict;
1260 LASSERT(cl_lock_is_mutexed(lock));
1261 LASSERT(lock->cll_state == CLS_QUEUING);
1262 LASSERT(lock->cll_conflict != NULL);
1264 conflict = lock->cll_conflict;
1265 lock->cll_conflict = NULL;
1267 cl_lock_mutex_put(env, lock);
1268 LASSERT(cl_lock_nr_mutexed(env) == 0);
1270 cl_lock_mutex_get(env, conflict);
1271 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1272 cl_lock_cancel(env, conflict);
1273 cl_lock_delete(env, conflict);
1275 while (conflict->cll_state != CLS_FREEING) {
1276 rc = cl_lock_state_wait(env, conflict);
1280 cl_lock_mutex_put(env, conflict);
1281 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1282 cl_lock_put(env, conflict);
1285 cl_lock_mutex_get(env, lock);
1290 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1292 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1293 struct cl_io *io, __u32 enqflags)
1299 LINVRNT(cl_lock_is_mutexed(lock));
1300 LINVRNT(cl_lock_invariant(env, lock));
1301 LASSERT(lock->cll_holds > 0);
1303 cl_lock_user_add(env, lock);
1305 result = cl_enqueue_try(env, lock, io, enqflags);
1306 if (result == CLO_WAIT) {
1307 if (lock->cll_conflict != NULL)
1308 result = cl_lock_enqueue_wait(env, lock, 1);
1310 result = cl_lock_state_wait(env, lock);
1317 cl_unuse_try(env, lock);
1318 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1319 lock->cll_state == CLS_ENQUEUED ||
1320 lock->cll_state == CLS_HELD));
1327 * \pre current thread or io owns a hold on lock.
1329 * \post ergo(result == 0, lock->users increased)
1330 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1331 * lock->cll_state == CLS_HELD)
1333 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1334 struct cl_io *io, __u32 enqflags)
1340 cl_lock_lockdep_acquire(env, lock, enqflags);
1341 cl_lock_mutex_get(env, lock);
1342 result = cl_enqueue_locked(env, lock, io, enqflags);
1343 cl_lock_mutex_put(env, lock);
1345 cl_lock_lockdep_release(env, lock);
1346 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1347 lock->cll_state == CLS_HELD));
1350 EXPORT_SYMBOL(cl_enqueue);
1353 * Tries to unlock a lock.
1355 * This function is called to release underlying resource:
1356 * 1. for top lock, the resource is sublocks it held;
1357 * 2. for sublock, the resource is the reference to dlmlock.
1359 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1361 * \see cl_unuse() cl_lock_operations::clo_unuse()
1362 * \see cl_lock_state::CLS_CACHED
1364 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1367 enum cl_lock_state state = CLS_NEW;
1370 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1372 if (lock->cll_users > 1) {
1373 cl_lock_user_del(env, lock);
1377 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1378 * underlying resources. */
1379 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1380 cl_lock_user_del(env, lock);
1385 * New lock users (->cll_users) are not protecting unlocking
1386 * from proceeding. From this point, lock eventually reaches
1387 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1390 state = cl_lock_intransit(env, lock);
1392 result = cl_unuse_try_internal(env, lock);
1393 LASSERT(lock->cll_state == CLS_INTRANSIT);
1394 LASSERT(result != CLO_WAIT);
1395 cl_lock_user_del(env, lock);
1396 if (result == 0 || result == -ESTALE) {
1398 * Return lock back to the cache. This is the only
1399 * place where lock is moved into CLS_CACHED state.
1401 * If one of ->clo_unuse() methods returned -ESTALE, lock
1402 * cannot be placed into cache and has to be
1403 * re-initialized. This happens e.g., when a sub-lock was
1404 * canceled while unlocking was in progress.
1406 if (state == CLS_HELD && result == 0)
1410 cl_lock_extransit(env, lock, state);
1413 * Hide -ESTALE error.
1414 * If the lock is a glimpse lock, and it has multiple
1415 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1416 * and other sublocks are matched write locks. In this case,
1417 * we can't set this lock to error because otherwise some of
1418 * its sublocks may not be canceled. This causes some dirty
1419 * pages won't be written to OSTs. -jay
1423 CERROR("result = %d, this is unlikely!\n", result);
1425 cl_lock_extransit(env, lock, state);
1427 RETURN(result ?: lock->cll_error);
1429 EXPORT_SYMBOL(cl_unuse_try);
1431 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1436 result = cl_unuse_try(env, lock);
1438 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1446 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1449 cl_lock_mutex_get(env, lock);
1450 cl_unuse_locked(env, lock);
1451 cl_lock_mutex_put(env, lock);
1452 cl_lock_lockdep_release(env, lock);
1455 EXPORT_SYMBOL(cl_unuse);
1458 * Tries to wait for a lock.
1460 * This function is called repeatedly by cl_wait() until either lock is
1461 * granted, or error occurs. This function does not block waiting for network
1462 * communication to complete.
1464 * \see cl_wait() cl_lock_operations::clo_wait()
1465 * \see cl_lock_state::CLS_HELD
1467 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1469 const struct cl_lock_slice *slice;
1473 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1475 LINVRNT(cl_lock_is_mutexed(lock));
1476 LINVRNT(cl_lock_invariant(env, lock));
1477 LASSERTF(lock->cll_state == CLS_QUEUING ||
1478 lock->cll_state == CLS_ENQUEUED ||
1479 lock->cll_state == CLS_HELD ||
1480 lock->cll_state == CLS_INTRANSIT,
1481 "lock state: %d\n", lock->cll_state);
1482 LASSERT(lock->cll_users > 0);
1483 LASSERT(lock->cll_holds > 0);
1485 result = lock->cll_error;
1489 if (cl_lock_is_intransit(lock)) {
1494 if (lock->cll_state == CLS_HELD)
1499 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1500 if (slice->cls_ops->clo_wait != NULL) {
1501 result = slice->cls_ops->clo_wait(env, slice);
1506 LASSERT(result != -ENOSYS);
1508 LASSERT(lock->cll_state != CLS_INTRANSIT);
1509 cl_lock_state_set(env, lock, CLS_HELD);
1511 } while (result == CLO_REPEAT);
1514 EXPORT_SYMBOL(cl_wait_try);
1517 * Waits until enqueued lock is granted.
1519 * \pre current thread or io owns a hold on the lock
1520 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1521 * lock->cll_state == CLS_HELD)
1523 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1525 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1530 cl_lock_mutex_get(env, lock);
1532 LINVRNT(cl_lock_invariant(env, lock));
1533 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1534 "Wrong state %d \n", lock->cll_state);
1535 LASSERT(lock->cll_holds > 0);
1538 result = cl_wait_try(env, lock);
1539 if (result == CLO_WAIT) {
1540 result = cl_lock_state_wait(env, lock);
1547 cl_unuse_try(env, lock);
1548 cl_lock_lockdep_release(env, lock);
1550 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1551 cl_lock_mutex_put(env, lock);
1552 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1555 EXPORT_SYMBOL(cl_wait);
1558 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1561 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1563 const struct cl_lock_slice *slice;
1564 unsigned long pound;
1565 unsigned long ounce;
1568 LINVRNT(cl_lock_is_mutexed(lock));
1569 LINVRNT(cl_lock_invariant(env, lock));
1572 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1573 if (slice->cls_ops->clo_weigh != NULL) {
1574 ounce = slice->cls_ops->clo_weigh(env, slice);
1576 if (pound < ounce) /* over-weight^Wflow */
1582 EXPORT_SYMBOL(cl_lock_weigh);
1585 * Notifies layers that lock description changed.
1587 * The server can grant client a lock different from one that was requested
1588 * (e.g., larger in extent). This method is called when actually granted lock
1589 * description becomes known to let layers to accommodate for changed lock
1592 * \see cl_lock_operations::clo_modify()
1594 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1595 const struct cl_lock_descr *desc)
1597 const struct cl_lock_slice *slice;
1598 struct cl_object *obj = lock->cll_descr.cld_obj;
1599 struct cl_object_header *hdr = cl_object_header(obj);
1603 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1604 /* don't allow object to change */
1605 LASSERT(obj == desc->cld_obj);
1606 LINVRNT(cl_lock_is_mutexed(lock));
1607 LINVRNT(cl_lock_invariant(env, lock));
1609 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1610 if (slice->cls_ops->clo_modify != NULL) {
1611 result = slice->cls_ops->clo_modify(env, slice, desc);
1616 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1617 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1619 * Just replace description in place. Nothing more is needed for
1620 * now. If locks were indexed according to their extent and/or mode,
1621 * that index would have to be updated here.
1623 spin_lock(&hdr->coh_lock_guard);
1624 lock->cll_descr = *desc;
1625 spin_unlock(&hdr->coh_lock_guard);
1628 EXPORT_SYMBOL(cl_lock_modify);
1631 * Initializes lock closure with a given origin.
1633 * \see cl_lock_closure
1635 void cl_lock_closure_init(const struct lu_env *env,
1636 struct cl_lock_closure *closure,
1637 struct cl_lock *origin, int wait)
1639 LINVRNT(cl_lock_is_mutexed(origin));
1640 LINVRNT(cl_lock_invariant(env, origin));
1642 CFS_INIT_LIST_HEAD(&closure->clc_list);
1643 closure->clc_origin = origin;
1644 closure->clc_wait = wait;
1645 closure->clc_nr = 0;
1647 EXPORT_SYMBOL(cl_lock_closure_init);
1650 * Builds a closure of \a lock.
1652 * Building of a closure consists of adding initial lock (\a lock) into it,
1653 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1654 * methods might call cl_lock_closure_build() recursively again, adding more
1655 * locks to the closure, etc.
1657 * \see cl_lock_closure
1659 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1660 struct cl_lock_closure *closure)
1662 const struct cl_lock_slice *slice;
1666 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1667 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1669 result = cl_lock_enclosure(env, lock, closure);
1671 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1672 if (slice->cls_ops->clo_closure != NULL) {
1673 result = slice->cls_ops->clo_closure(env, slice,
1681 cl_lock_disclosure(env, closure);
1684 EXPORT_SYMBOL(cl_lock_closure_build);
1687 * Adds new lock to a closure.
1689 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1690 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1691 * until next try-lock is likely to succeed.
1693 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1694 struct cl_lock_closure *closure)
1698 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1699 if (!cl_lock_mutex_try(env, lock)) {
1701 * If lock->cll_inclosure is not empty, lock is already in
1704 if (cfs_list_empty(&lock->cll_inclosure)) {
1705 cl_lock_get_trust(lock);
1706 lu_ref_add(&lock->cll_reference, "closure", closure);
1707 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1710 cl_lock_mutex_put(env, lock);
1713 cl_lock_disclosure(env, closure);
1714 if (closure->clc_wait) {
1715 cl_lock_get_trust(lock);
1716 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1717 cl_lock_mutex_put(env, closure->clc_origin);
1719 LASSERT(cl_lock_nr_mutexed(env) == 0);
1720 cl_lock_mutex_get(env, lock);
1721 cl_lock_mutex_put(env, lock);
1723 cl_lock_mutex_get(env, closure->clc_origin);
1724 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1725 cl_lock_put(env, lock);
1727 result = CLO_REPEAT;
1731 EXPORT_SYMBOL(cl_lock_enclosure);
1733 /** Releases mutices of enclosed locks. */
1734 void cl_lock_disclosure(const struct lu_env *env,
1735 struct cl_lock_closure *closure)
1737 struct cl_lock *scan;
1738 struct cl_lock *temp;
1740 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1741 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1743 cfs_list_del_init(&scan->cll_inclosure);
1744 cl_lock_mutex_put(env, scan);
1745 lu_ref_del(&scan->cll_reference, "closure", closure);
1746 cl_lock_put(env, scan);
1749 LASSERT(closure->clc_nr == 0);
1751 EXPORT_SYMBOL(cl_lock_disclosure);
1753 /** Finalizes a closure. */
1754 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1756 LASSERT(closure->clc_nr == 0);
1757 LASSERT(cfs_list_empty(&closure->clc_list));
1759 EXPORT_SYMBOL(cl_lock_closure_fini);
1762 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1763 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1764 * destruction until all holds are released. This is called when a decision is
1765 * made to destroy the lock in the future. E.g., when a blocking AST is
1766 * received on it, or fatal communication error happens.
1768 * Caller must have a reference on this lock to prevent a situation, when
1769 * deleted lock lingers in memory for indefinite time, because nobody calls
1770 * cl_lock_put() to finish it.
1772 * \pre atomic_read(&lock->cll_ref) > 0
1773 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1774 * cl_lock_nr_mutexed(env) == 1)
1775 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1776 * held, as deletion of sub-locks might require releasing a top-lock
1779 * \see cl_lock_operations::clo_delete()
1780 * \see cl_lock::cll_holds
1782 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1784 LINVRNT(cl_lock_is_mutexed(lock));
1785 LINVRNT(cl_lock_invariant(env, lock));
1786 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1787 cl_lock_nr_mutexed(env) == 1));
1790 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1791 if (lock->cll_holds == 0)
1792 cl_lock_delete0(env, lock);
1794 lock->cll_flags |= CLF_DOOMED;
1797 EXPORT_SYMBOL(cl_lock_delete);
1800 * Mark lock as irrecoverably failed, and mark it for destruction. This
1801 * happens when, e.g., server fails to grant a lock to us, or networking
1804 * \pre atomic_read(&lock->cll_ref) > 0
1806 * \see clo_lock_delete()
1807 * \see cl_lock::cll_holds
1809 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1811 LINVRNT(cl_lock_is_mutexed(lock));
1812 LINVRNT(cl_lock_invariant(env, lock));
1815 if (lock->cll_error == 0 && error != 0) {
1816 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1817 lock->cll_error = error;
1818 cl_lock_signal(env, lock);
1819 cl_lock_cancel(env, lock);
1820 cl_lock_delete(env, lock);
1824 EXPORT_SYMBOL(cl_lock_error);
1827 * Cancels this lock. Notifies layers
1828 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1829 * there are holds on the lock, postpone cancellation until
1830 * all holds are released.
1832 * Cancellation notification is delivered to layers at most once.
1834 * \see cl_lock_operations::clo_cancel()
1835 * \see cl_lock::cll_holds
1837 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1839 LINVRNT(cl_lock_is_mutexed(lock));
1840 LINVRNT(cl_lock_invariant(env, lock));
1843 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1844 if (lock->cll_holds == 0)
1845 cl_lock_cancel0(env, lock);
1847 lock->cll_flags |= CLF_CANCELPEND;
1850 EXPORT_SYMBOL(cl_lock_cancel);
1853 * Finds an existing lock covering given index and optionally different from a
1854 * given \a except lock.
1856 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1857 struct cl_object *obj, pgoff_t index,
1858 struct cl_lock *except,
1859 int pending, int canceld)
1861 struct cl_object_header *head;
1862 struct cl_lock *scan;
1863 struct cl_lock *lock;
1864 struct cl_lock_descr *need;
1868 head = cl_object_header(obj);
1869 need = &cl_env_info(env)->clt_descr;
1872 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1874 need->cld_start = need->cld_end = index;
1875 need->cld_enq_flags = 0;
1877 spin_lock(&head->coh_lock_guard);
1878 /* It is fine to match any group lock since there could be only one
1879 * with a uniq gid and it conflicts with all other lock modes too */
1880 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1881 if (scan != except &&
1882 (scan->cll_descr.cld_mode == CLM_GROUP ||
1883 cl_lock_ext_match(&scan->cll_descr, need)) &&
1884 scan->cll_state >= CLS_HELD &&
1885 scan->cll_state < CLS_FREEING &&
1887 * This check is racy as the lock can be canceled right
1888 * after it is done, but this is fine, because page exists
1891 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1892 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1893 /* Don't increase cs_hit here since this
1894 * is just a helper function. */
1895 cl_lock_get_trust(scan);
1900 spin_unlock(&head->coh_lock_guard);
1903 EXPORT_SYMBOL(cl_lock_at_pgoff);
1906 * Calculate the page offset at the layer of @lock.
1907 * At the time of this writing, @page is top page and @lock is sub lock.
1909 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1911 struct lu_device_type *dtype;
1912 const struct cl_page_slice *slice;
1914 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1915 slice = cl_page_at(page, dtype);
1916 LASSERT(slice != NULL);
1917 return slice->cpl_page->cp_index;
1921 * Check if page @page is covered by an extra lock or discard it.
1923 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1924 struct cl_page *page, void *cbdata)
1926 struct cl_thread_info *info = cl_env_info(env);
1927 struct cl_lock *lock = cbdata;
1928 pgoff_t index = pgoff_at_lock(page, lock);
1930 if (index >= info->clt_fn_index) {
1931 struct cl_lock *tmp;
1933 /* refresh non-overlapped index */
1934 tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
1937 /* Cache the first-non-overlapped index so as to skip
1938 * all pages within [index, clt_fn_index). This
1939 * is safe because if tmp lock is canceled, it will
1940 * discard these pages. */
1941 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1942 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1943 info->clt_fn_index = CL_PAGE_EOF;
1944 cl_lock_put(env, tmp);
1945 } else if (cl_page_own(env, io, page) == 0) {
1946 /* discard the page */
1947 cl_page_unmap(env, io, page);
1948 cl_page_discard(env, io, page);
1949 cl_page_disown(env, io, page);
1951 LASSERT(page->cp_state == CPS_FREEING);
1955 info->clt_next_index = index + 1;
1956 return CLP_GANG_OKAY;
1959 static int discard_cb(const struct lu_env *env, struct cl_io *io,
1960 struct cl_page *page, void *cbdata)
1962 struct cl_thread_info *info = cl_env_info(env);
1963 struct cl_lock *lock = cbdata;
1965 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
1966 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1967 !PageWriteback(cl_page_vmpage(env, page))));
1968 KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
1969 !PageDirty(cl_page_vmpage(env, page))));
1971 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1972 if (cl_page_own(env, io, page) == 0) {
1973 /* discard the page */
1974 cl_page_unmap(env, io, page);
1975 cl_page_discard(env, io, page);
1976 cl_page_disown(env, io, page);
1978 LASSERT(page->cp_state == CPS_FREEING);
1981 return CLP_GANG_OKAY;
1985 * Discard pages protected by the given lock. This function traverses radix
1986 * tree to find all covering pages and discard them. If a page is being covered
1987 * by other locks, it should remain in cache.
1989 * If error happens on any step, the process continues anyway (the reasoning
1990 * behind this being that lock cancellation cannot be delayed indefinitely).
1992 int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
1994 struct cl_thread_info *info = cl_env_info(env);
1995 struct cl_io *io = &info->clt_io;
1996 struct cl_lock_descr *descr = &lock->cll_descr;
1997 cl_page_gang_cb_t cb;
2001 LINVRNT(cl_lock_invariant(env, lock));
2004 io->ci_obj = cl_object_top(descr->cld_obj);
2005 io->ci_ignore_layout = 1;
2006 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2010 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
2011 info->clt_fn_index = info->clt_next_index = descr->cld_start;
2013 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2014 info->clt_next_index, descr->cld_end,
2016 if (info->clt_next_index > descr->cld_end)
2019 if (res == CLP_GANG_RESCHED)
2021 } while (res != CLP_GANG_OKAY);
2023 cl_io_fini(env, io);
2026 EXPORT_SYMBOL(cl_lock_discard_pages);
2029 * Eliminate all locks for a given object.
2031 * Caller has to guarantee that no lock is in active use.
2033 * \param cancel when this is set, cl_locks_prune() cancels locks before
2036 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2038 struct cl_object_header *head;
2039 struct cl_lock *lock;
2042 head = cl_object_header(obj);
2044 * If locks are destroyed without cancellation, all pages must be
2045 * already destroyed (as otherwise they will be left unprotected).
2047 LASSERT(ergo(!cancel,
2048 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2050 spin_lock(&head->coh_lock_guard);
2051 while (!cfs_list_empty(&head->coh_locks)) {
2052 lock = container_of(head->coh_locks.next,
2053 struct cl_lock, cll_linkage);
2054 cl_lock_get_trust(lock);
2055 spin_unlock(&head->coh_lock_guard);
2056 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2059 cl_lock_mutex_get(env, lock);
2060 if (lock->cll_state < CLS_FREEING) {
2061 LASSERT(lock->cll_users <= 1);
2062 if (unlikely(lock->cll_users == 1)) {
2063 struct l_wait_info lwi = { 0 };
2065 cl_lock_mutex_put(env, lock);
2066 l_wait_event(lock->cll_wq,
2067 lock->cll_users == 0,
2073 cl_lock_cancel(env, lock);
2074 cl_lock_delete(env, lock);
2076 cl_lock_mutex_put(env, lock);
2077 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2078 cl_lock_put(env, lock);
2079 spin_lock(&head->coh_lock_guard);
2081 spin_unlock(&head->coh_lock_guard);
2084 EXPORT_SYMBOL(cl_locks_prune);
2086 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2087 const struct cl_io *io,
2088 const struct cl_lock_descr *need,
2089 const char *scope, const void *source)
2091 struct cl_lock *lock;
2096 lock = cl_lock_find(env, io, need);
2099 cl_lock_mutex_get(env, lock);
2100 if (lock->cll_state < CLS_FREEING &&
2101 !(lock->cll_flags & CLF_CANCELLED)) {
2102 cl_lock_hold_mod(env, lock, +1);
2103 lu_ref_add(&lock->cll_holders, scope, source);
2104 lu_ref_add(&lock->cll_reference, scope, source);
2107 cl_lock_mutex_put(env, lock);
2108 cl_lock_put(env, lock);
2114 * Returns a lock matching \a need description with a reference and a hold on
2117 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2118 * guarantees that lock is not in the CLS_FREEING state on return.
2120 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2121 const struct cl_lock_descr *need,
2122 const char *scope, const void *source)
2124 struct cl_lock *lock;
2128 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2130 cl_lock_mutex_put(env, lock);
2133 EXPORT_SYMBOL(cl_lock_hold);
2136 * Main high-level entry point of cl_lock interface that finds existing or
2137 * enqueues new lock matching given description.
2139 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2140 const struct cl_lock_descr *need,
2141 const char *scope, const void *source)
2143 struct cl_lock *lock;
2145 __u32 enqflags = need->cld_enq_flags;
2149 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2153 rc = cl_enqueue_locked(env, lock, io, enqflags);
2155 if (cl_lock_fits_into(env, lock, need, io)) {
2156 if (!(enqflags & CEF_AGL)) {
2157 cl_lock_mutex_put(env, lock);
2158 cl_lock_lockdep_acquire(env, lock,
2164 cl_unuse_locked(env, lock);
2166 cl_lock_trace(D_DLMTRACE, env,
2167 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2168 cl_lock_hold_release(env, lock, scope, source);
2169 cl_lock_mutex_put(env, lock);
2170 lu_ref_del(&lock->cll_reference, scope, source);
2171 cl_lock_put(env, lock);
2173 LASSERT(enqflags & CEF_AGL);
2175 } else if (rc != 0) {
2181 EXPORT_SYMBOL(cl_lock_request);
2184 * Adds a hold to a known lock.
2186 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2187 const char *scope, const void *source)
2189 LINVRNT(cl_lock_is_mutexed(lock));
2190 LINVRNT(cl_lock_invariant(env, lock));
2191 LASSERT(lock->cll_state != CLS_FREEING);
2194 cl_lock_hold_mod(env, lock, +1);
2196 lu_ref_add(&lock->cll_holders, scope, source);
2197 lu_ref_add(&lock->cll_reference, scope, source);
2200 EXPORT_SYMBOL(cl_lock_hold_add);
2203 * Releases a hold and a reference on a lock, on which caller acquired a
2206 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2207 const char *scope, const void *source)
2209 LINVRNT(cl_lock_invariant(env, lock));
2211 cl_lock_hold_release(env, lock, scope, source);
2212 lu_ref_del(&lock->cll_reference, scope, source);
2213 cl_lock_put(env, lock);
2216 EXPORT_SYMBOL(cl_lock_unhold);
2219 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2221 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2222 const char *scope, const void *source)
2224 LINVRNT(cl_lock_invariant(env, lock));
2226 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2227 cl_lock_mutex_get(env, lock);
2228 cl_lock_hold_release(env, lock, scope, source);
2229 cl_lock_mutex_put(env, lock);
2230 lu_ref_del(&lock->cll_reference, scope, source);
2231 cl_lock_put(env, lock);
2234 EXPORT_SYMBOL(cl_lock_release);
2236 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2238 LINVRNT(cl_lock_is_mutexed(lock));
2239 LINVRNT(cl_lock_invariant(env, lock));
2242 cl_lock_used_mod(env, lock, +1);
2245 EXPORT_SYMBOL(cl_lock_user_add);
2247 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2249 LINVRNT(cl_lock_is_mutexed(lock));
2250 LINVRNT(cl_lock_invariant(env, lock));
2251 LASSERT(lock->cll_users > 0);
2254 cl_lock_used_mod(env, lock, -1);
2255 if (lock->cll_users == 0)
2256 cfs_waitq_broadcast(&lock->cll_wq);
2259 EXPORT_SYMBOL(cl_lock_user_del);
2261 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2263 static const char *names[] = {
2264 [CLM_PHANTOM] = "P",
2269 if (0 <= mode && mode < ARRAY_SIZE(names))
2274 EXPORT_SYMBOL(cl_lock_mode_name);
2277 * Prints human readable representation of a lock description.
2279 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2280 lu_printer_t printer,
2281 const struct cl_lock_descr *descr)
2283 const struct lu_fid *fid;
2285 fid = lu_object_fid(&descr->cld_obj->co_lu);
2286 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2288 EXPORT_SYMBOL(cl_lock_descr_print);
2291 * Prints human readable representation of \a lock to the \a f.
2293 void cl_lock_print(const struct lu_env *env, void *cookie,
2294 lu_printer_t printer, const struct cl_lock *lock)
2296 const struct cl_lock_slice *slice;
2297 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2298 lock, cfs_atomic_read(&lock->cll_ref),
2299 lock->cll_state, lock->cll_error, lock->cll_holds,
2300 lock->cll_users, lock->cll_flags);
2301 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2302 (*printer)(env, cookie, " {\n");
2304 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2305 (*printer)(env, cookie, " %s@%p: ",
2306 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2308 if (slice->cls_ops->clo_print != NULL)
2309 slice->cls_ops->clo_print(env, cookie, printer, slice);
2310 (*printer)(env, cookie, "\n");
2312 (*printer)(env, cookie, "} lock@%p\n", lock);
2314 EXPORT_SYMBOL(cl_lock_print);
2316 int cl_lock_init(void)
2318 return lu_kmem_init(cl_lock_caches);
2321 void cl_lock_fini(void)
2323 lu_kmem_fini(cl_lock_caches);