4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
50 #include <cl_object.h>
51 #include "cl_internal.h"
53 /** Lock class of cl_lock::cll_guard */
54 static cfs_lock_class_key_t cl_lock_guard_class;
55 static cfs_mem_cache_t *cl_lock_kmem;
57 static struct lu_kmem_descr cl_lock_caches[] = {
59 .ckd_cache = &cl_lock_kmem,
60 .ckd_name = "cl_lock_kmem",
61 .ckd_size = sizeof (struct cl_lock)
69 * Basic lock invariant that is maintained at all times. Caller either has a
70 * reference to \a lock, or somehow assures that \a lock cannot be freed.
72 * \see cl_lock_invariant()
74 static int cl_lock_invariant_trusted(const struct lu_env *env,
75 const struct cl_lock *lock)
77 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
78 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
79 lock->cll_holds >= lock->cll_users &&
80 lock->cll_holds >= 0 &&
81 lock->cll_users >= 0 &&
86 * Stronger lock invariant, checking that caller has a reference on a lock.
88 * \see cl_lock_invariant_trusted()
90 static int cl_lock_invariant(const struct lu_env *env,
91 const struct cl_lock *lock)
95 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
96 cl_lock_invariant_trusted(env, lock);
97 if (!result && env != NULL)
98 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
103 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
105 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
107 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
111 * Returns a set of counters for this lock, depending on a lock nesting.
113 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
114 const struct cl_lock *lock)
116 struct cl_thread_info *info;
117 enum clt_nesting_level nesting;
119 info = cl_env_info(env);
120 nesting = cl_lock_nesting(lock);
121 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
122 return &info->clt_counters[nesting];
125 static void cl_lock_trace0(int level, const struct lu_env *env,
126 const char *prefix, const struct cl_lock *lock,
127 const char *func, const int line)
129 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
130 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
131 "(%p/%d/%d) at %s():%d\n",
132 prefix, lock, cfs_atomic_read(&lock->cll_ref),
133 lock->cll_guarder, lock->cll_depth,
134 lock->cll_state, lock->cll_error, lock->cll_holds,
135 lock->cll_users, lock->cll_flags,
136 env, h->coh_nesting, cl_lock_nr_mutexed(env),
139 #define cl_lock_trace(level, env, prefix, lock) \
140 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
142 #define RETIP ((unsigned long)__builtin_return_address(0))
144 #ifdef CONFIG_LOCKDEP
145 static cfs_lock_class_key_t cl_lock_key;
147 static void cl_lock_lockdep_init(struct cl_lock *lock)
149 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
152 static void cl_lock_lockdep_acquire(const struct lu_env *env,
153 struct cl_lock *lock, __u32 enqflags)
155 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
156 #ifdef HAVE_LOCK_MAP_ACQUIRE
157 lock_map_acquire(&lock->dep_map);
158 #else /* HAVE_LOCK_MAP_ACQUIRE */
159 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
160 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
161 /* check: */ 2, RETIP);
162 #endif /* HAVE_LOCK_MAP_ACQUIRE */
165 static void cl_lock_lockdep_release(const struct lu_env *env,
166 struct cl_lock *lock)
168 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
169 lock_release(&lock->dep_map, 0, RETIP);
172 #else /* !CONFIG_LOCKDEP */
174 static void cl_lock_lockdep_init(struct cl_lock *lock)
176 static void cl_lock_lockdep_acquire(const struct lu_env *env,
177 struct cl_lock *lock, __u32 enqflags)
179 static void cl_lock_lockdep_release(const struct lu_env *env,
180 struct cl_lock *lock)
183 #endif /* !CONFIG_LOCKDEP */
186 * Adds lock slice to the compound lock.
188 * This is called by cl_object_operations::coo_lock_init() methods to add a
189 * per-layer state to the lock. New state is added at the end of
190 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
192 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
194 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
195 struct cl_object *obj,
196 const struct cl_lock_operations *ops)
199 slice->cls_lock = lock;
200 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
201 slice->cls_obj = obj;
202 slice->cls_ops = ops;
205 EXPORT_SYMBOL(cl_lock_slice_add);
208 * Returns true iff a lock with the mode \a has provides at least the same
209 * guarantees as a lock with the mode \a need.
211 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
213 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
214 need == CLM_PHANTOM || need == CLM_GROUP);
215 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
216 has == CLM_PHANTOM || has == CLM_GROUP);
217 CLASSERT(CLM_PHANTOM < CLM_READ);
218 CLASSERT(CLM_READ < CLM_WRITE);
219 CLASSERT(CLM_WRITE < CLM_GROUP);
221 if (has != CLM_GROUP)
226 EXPORT_SYMBOL(cl_lock_mode_match);
229 * Returns true iff extent portions of lock descriptions match.
231 int cl_lock_ext_match(const struct cl_lock_descr *has,
232 const struct cl_lock_descr *need)
235 has->cld_start <= need->cld_start &&
236 has->cld_end >= need->cld_end &&
237 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
238 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
240 EXPORT_SYMBOL(cl_lock_ext_match);
243 * Returns true iff a lock with the description \a has provides at least the
244 * same guarantees as a lock with the description \a need.
246 int cl_lock_descr_match(const struct cl_lock_descr *has,
247 const struct cl_lock_descr *need)
250 cl_object_same(has->cld_obj, need->cld_obj) &&
251 cl_lock_ext_match(has, need);
253 EXPORT_SYMBOL(cl_lock_descr_match);
255 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
257 struct cl_object *obj = lock->cll_descr.cld_obj;
259 LINVRNT(!cl_lock_is_mutexed(lock));
262 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
264 while (!cfs_list_empty(&lock->cll_layers)) {
265 struct cl_lock_slice *slice;
267 slice = cfs_list_entry(lock->cll_layers.next,
268 struct cl_lock_slice, cls_linkage);
269 cfs_list_del_init(lock->cll_layers.next);
270 slice->cls_ops->clo_fini(env, slice);
272 cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
273 cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
274 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
275 cl_object_put(env, obj);
276 lu_ref_fini(&lock->cll_reference);
277 lu_ref_fini(&lock->cll_holders);
278 cfs_mutex_destroy(&lock->cll_guard);
279 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
284 * Releases a reference on a lock.
286 * When last reference is released, lock is returned to the cache, unless it
287 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
290 * \see cl_object_put(), cl_page_put()
292 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
294 struct cl_object *obj;
295 struct cl_site *site;
297 LINVRNT(cl_lock_invariant(env, lock));
299 obj = lock->cll_descr.cld_obj;
300 LINVRNT(obj != NULL);
301 site = cl_object_site(obj);
303 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
304 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
306 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
307 if (lock->cll_state == CLS_FREEING) {
308 LASSERT(cfs_list_empty(&lock->cll_linkage));
309 cl_lock_free(env, lock);
311 cfs_atomic_dec(&site->cs_locks.cs_busy);
315 EXPORT_SYMBOL(cl_lock_put);
318 * Acquires an additional reference to a lock.
320 * This can be called only by caller already possessing a reference to \a
323 * \see cl_object_get(), cl_page_get()
325 void cl_lock_get(struct cl_lock *lock)
327 LINVRNT(cl_lock_invariant(NULL, lock));
328 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
329 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
330 cfs_atomic_inc(&lock->cll_ref);
332 EXPORT_SYMBOL(cl_lock_get);
335 * Acquires a reference to a lock.
337 * This is much like cl_lock_get(), except that this function can be used to
338 * acquire initial reference to the cached lock. Caller has to deal with all
339 * possible races. Use with care!
341 * \see cl_page_get_trust()
343 void cl_lock_get_trust(struct cl_lock *lock)
345 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
347 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
348 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
349 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
350 cfs_atomic_inc(&site->cs_locks.cs_busy);
352 EXPORT_SYMBOL(cl_lock_get_trust);
355 * Helper function destroying the lock that wasn't completely initialized.
357 * Other threads can acquire references to the top-lock through its
358 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
360 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
362 cl_lock_mutex_get(env, lock);
363 cl_lock_cancel(env, lock);
364 cl_lock_delete(env, lock);
365 cl_lock_mutex_put(env, lock);
366 cl_lock_put(env, lock);
369 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
370 struct cl_object *obj,
371 const struct cl_io *io,
372 const struct cl_lock_descr *descr)
374 struct cl_lock *lock;
375 struct lu_object_header *head;
376 struct cl_site *site = cl_object_site(obj);
379 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
381 cfs_atomic_set(&lock->cll_ref, 1);
382 lock->cll_descr = *descr;
383 lock->cll_state = CLS_NEW;
385 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
387 CFS_INIT_LIST_HEAD(&lock->cll_layers);
388 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
389 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
390 lu_ref_init(&lock->cll_reference);
391 lu_ref_init(&lock->cll_holders);
392 cfs_mutex_init(&lock->cll_guard);
393 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
394 cfs_waitq_init(&lock->cll_wq);
395 head = obj->co_lu.lo_header;
396 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
397 cfs_atomic_inc(&site->cs_locks.cs_total);
398 cfs_atomic_inc(&site->cs_locks.cs_created);
399 cl_lock_lockdep_init(lock);
400 cfs_list_for_each_entry(obj, &head->loh_layers,
404 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
406 cl_lock_finish(env, lock);
412 lock = ERR_PTR(-ENOMEM);
417 * Transfer the lock into INTRANSIT state and return the original state.
419 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
420 * \post state: CLS_INTRANSIT
423 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
424 struct cl_lock *lock)
426 enum cl_lock_state state = lock->cll_state;
428 LASSERT(cl_lock_is_mutexed(lock));
429 LASSERT(state != CLS_INTRANSIT);
430 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
431 "Malformed lock state %d.\n", state);
433 cl_lock_state_set(env, lock, CLS_INTRANSIT);
434 lock->cll_intransit_owner = cfs_current();
435 cl_lock_hold_add(env, lock, "intransit", cfs_current());
438 EXPORT_SYMBOL(cl_lock_intransit);
441 * Exit the intransit state and restore the lock state to the original state
443 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
444 enum cl_lock_state state)
446 LASSERT(cl_lock_is_mutexed(lock));
447 LASSERT(lock->cll_state == CLS_INTRANSIT);
448 LASSERT(state != CLS_INTRANSIT);
449 LASSERT(lock->cll_intransit_owner == cfs_current());
451 lock->cll_intransit_owner = NULL;
452 cl_lock_state_set(env, lock, state);
453 cl_lock_unhold(env, lock, "intransit", cfs_current());
455 EXPORT_SYMBOL(cl_lock_extransit);
458 * Checking whether the lock is intransit state
460 int cl_lock_is_intransit(struct cl_lock *lock)
462 LASSERT(cl_lock_is_mutexed(lock));
463 return lock->cll_state == CLS_INTRANSIT &&
464 lock->cll_intransit_owner != cfs_current();
466 EXPORT_SYMBOL(cl_lock_is_intransit);
468 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
469 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
470 * cover multiple stripes and can trigger cascading timeouts.
472 static int cl_lock_fits_into(const struct lu_env *env,
473 const struct cl_lock *lock,
474 const struct cl_lock_descr *need,
475 const struct cl_io *io)
477 const struct cl_lock_slice *slice;
479 LINVRNT(cl_lock_invariant_trusted(env, lock));
481 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
482 if (slice->cls_ops->clo_fits_into != NULL &&
483 !slice->cls_ops->clo_fits_into(env, slice, need, io))
489 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
490 struct cl_object *obj,
491 const struct cl_io *io,
492 const struct cl_lock_descr *need)
494 struct cl_lock *lock;
495 struct cl_object_header *head;
496 struct cl_site *site;
500 head = cl_object_header(obj);
501 site = cl_object_site(obj);
502 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
503 cfs_atomic_inc(&site->cs_locks.cs_lookup);
504 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
507 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
508 lock->cll_state < CLS_FREEING &&
509 lock->cll_error == 0 &&
510 !(lock->cll_flags & CLF_CANCELLED) &&
511 cl_lock_fits_into(env, lock, need, io);
512 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
513 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
516 cl_lock_get_trust(lock);
517 cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
525 * Returns a lock matching description \a need.
527 * This is the main entry point into the cl_lock caching interface. First, a
528 * cache (implemented as a per-object linked list) is consulted. If lock is
529 * found there, it is returned immediately. Otherwise new lock is allocated
530 * and returned. In any case, additional reference to lock is acquired.
532 * \see cl_object_find(), cl_page_find()
534 static struct cl_lock *cl_lock_find(const struct lu_env *env,
535 const struct cl_io *io,
536 const struct cl_lock_descr *need)
538 struct cl_object_header *head;
539 struct cl_object *obj;
540 struct cl_lock *lock;
541 struct cl_site *site;
546 head = cl_object_header(obj);
547 site = cl_object_site(obj);
549 cfs_spin_lock(&head->coh_lock_guard);
550 lock = cl_lock_lookup(env, obj, io, need);
551 cfs_spin_unlock(&head->coh_lock_guard);
554 lock = cl_lock_alloc(env, obj, io, need);
556 struct cl_lock *ghost;
558 cfs_spin_lock(&head->coh_lock_guard);
559 ghost = cl_lock_lookup(env, obj, io, need);
561 cfs_list_add_tail(&lock->cll_linkage,
563 cfs_spin_unlock(&head->coh_lock_guard);
564 cfs_atomic_inc(&site->cs_locks.cs_busy);
566 cfs_spin_unlock(&head->coh_lock_guard);
568 * Other threads can acquire references to the
569 * top-lock through its sub-locks. Hence, it
570 * cannot be cl_lock_free()-ed immediately.
572 cl_lock_finish(env, lock);
581 * Returns existing lock matching given description. This is similar to
582 * cl_lock_find() except that no new lock is created, and returned lock is
583 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
585 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
586 const struct cl_lock_descr *need,
587 const char *scope, const void *source)
589 struct cl_object_header *head;
590 struct cl_object *obj;
591 struct cl_lock *lock;
594 head = cl_object_header(obj);
596 cfs_spin_lock(&head->coh_lock_guard);
597 lock = cl_lock_lookup(env, obj, io, need);
598 cfs_spin_unlock(&head->coh_lock_guard);
603 cl_lock_mutex_get(env, lock);
604 if (lock->cll_state == CLS_INTRANSIT)
605 cl_lock_state_wait(env, lock); /* Don't care return value. */
606 if (lock->cll_state == CLS_CACHED) {
608 result = cl_use_try(env, lock, 1);
610 cl_lock_error(env, lock, result);
612 if (lock->cll_state == CLS_HELD) {
613 cl_lock_hold_add(env, lock, scope, source);
614 cl_lock_user_add(env, lock);
615 cl_lock_mutex_put(env, lock);
616 cl_lock_lockdep_acquire(env, lock, 0);
617 cl_lock_put(env, lock);
619 cl_lock_mutex_put(env, lock);
620 cl_lock_put(env, lock);
626 EXPORT_SYMBOL(cl_lock_peek);
629 * Returns a slice within a lock, corresponding to the given layer in the
634 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
635 const struct lu_device_type *dtype)
637 const struct cl_lock_slice *slice;
639 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
642 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
643 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
648 EXPORT_SYMBOL(cl_lock_at);
650 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
652 struct cl_thread_counters *counters;
654 counters = cl_lock_counters(env, lock);
656 counters->ctc_nr_locks_locked++;
657 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
658 cl_lock_trace(D_TRACE, env, "got mutex", lock);
662 * Locks cl_lock object.
664 * This is used to manipulate cl_lock fields, and to serialize state
665 * transitions in the lock state machine.
667 * \post cl_lock_is_mutexed(lock)
669 * \see cl_lock_mutex_put()
671 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
673 LINVRNT(cl_lock_invariant(env, lock));
675 if (lock->cll_guarder == cfs_current()) {
676 LINVRNT(cl_lock_is_mutexed(lock));
677 LINVRNT(lock->cll_depth > 0);
679 struct cl_object_header *hdr;
680 struct cl_thread_info *info;
683 LINVRNT(lock->cll_guarder != cfs_current());
684 hdr = cl_object_header(lock->cll_descr.cld_obj);
686 * Check that mutices are taken in the bottom-to-top order.
688 info = cl_env_info(env);
689 for (i = 0; i < hdr->coh_nesting; ++i)
690 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
691 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
692 lock->cll_guarder = cfs_current();
693 LINVRNT(lock->cll_depth == 0);
695 cl_lock_mutex_tail(env, lock);
697 EXPORT_SYMBOL(cl_lock_mutex_get);
700 * Try-locks cl_lock object.
702 * \retval 0 \a lock was successfully locked
704 * \retval -EBUSY \a lock cannot be locked right now
706 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
708 * \see cl_lock_mutex_get()
710 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
714 LINVRNT(cl_lock_invariant_trusted(env, lock));
718 if (lock->cll_guarder == cfs_current()) {
719 LINVRNT(lock->cll_depth > 0);
720 cl_lock_mutex_tail(env, lock);
721 } else if (cfs_mutex_trylock(&lock->cll_guard)) {
722 LINVRNT(lock->cll_depth == 0);
723 lock->cll_guarder = cfs_current();
724 cl_lock_mutex_tail(env, lock);
729 EXPORT_SYMBOL(cl_lock_mutex_try);
732 {* Unlocks cl_lock object.
734 * \pre cl_lock_is_mutexed(lock)
736 * \see cl_lock_mutex_get()
738 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
740 struct cl_thread_counters *counters;
742 LINVRNT(cl_lock_invariant(env, lock));
743 LINVRNT(cl_lock_is_mutexed(lock));
744 LINVRNT(lock->cll_guarder == cfs_current());
745 LINVRNT(lock->cll_depth > 0);
747 counters = cl_lock_counters(env, lock);
748 LINVRNT(counters->ctc_nr_locks_locked > 0);
750 cl_lock_trace(D_TRACE, env, "put mutex", lock);
751 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
752 counters->ctc_nr_locks_locked--;
753 if (--lock->cll_depth == 0) {
754 lock->cll_guarder = NULL;
755 cfs_mutex_unlock(&lock->cll_guard);
758 EXPORT_SYMBOL(cl_lock_mutex_put);
761 * Returns true iff lock's mutex is owned by the current thread.
763 int cl_lock_is_mutexed(struct cl_lock *lock)
765 return lock->cll_guarder == cfs_current();
767 EXPORT_SYMBOL(cl_lock_is_mutexed);
770 * Returns number of cl_lock mutices held by the current thread (environment).
772 int cl_lock_nr_mutexed(const struct lu_env *env)
774 struct cl_thread_info *info;
779 * NOTE: if summation across all nesting levels (currently 2) proves
780 * too expensive, a summary counter can be added to
781 * struct cl_thread_info.
783 info = cl_env_info(env);
784 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
785 locked += info->clt_counters[i].ctc_nr_locks_locked;
788 EXPORT_SYMBOL(cl_lock_nr_mutexed);
790 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
792 LINVRNT(cl_lock_is_mutexed(lock));
793 LINVRNT(cl_lock_invariant(env, lock));
795 if (!(lock->cll_flags & CLF_CANCELLED)) {
796 const struct cl_lock_slice *slice;
798 lock->cll_flags |= CLF_CANCELLED;
799 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
801 if (slice->cls_ops->clo_cancel != NULL)
802 slice->cls_ops->clo_cancel(env, slice);
808 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
810 struct cl_object_header *head;
811 const struct cl_lock_slice *slice;
813 LINVRNT(cl_lock_is_mutexed(lock));
814 LINVRNT(cl_lock_invariant(env, lock));
817 if (lock->cll_state < CLS_FREEING) {
818 LASSERT(lock->cll_state != CLS_INTRANSIT);
819 cl_lock_state_set(env, lock, CLS_FREEING);
821 head = cl_object_header(lock->cll_descr.cld_obj);
823 cfs_spin_lock(&head->coh_lock_guard);
824 cfs_list_del_init(&lock->cll_linkage);
826 cfs_spin_unlock(&head->coh_lock_guard);
828 * From now on, no new references to this lock can be acquired
829 * by cl_lock_lookup().
831 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
833 if (slice->cls_ops->clo_delete != NULL)
834 slice->cls_ops->clo_delete(env, slice);
837 * From now on, no new references to this lock can be acquired
838 * by layer-specific means (like a pointer from struct
839 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
842 * Lock will be finally freed in cl_lock_put() when last of
843 * existing references goes away.
850 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
851 * top-lock (nesting == 0) accounts for this modification in the per-thread
852 * debugging counters. Sub-lock holds can be released by a thread different
853 * from one that acquired it.
855 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
858 struct cl_thread_counters *counters;
859 enum clt_nesting_level nesting;
861 lock->cll_holds += delta;
862 nesting = cl_lock_nesting(lock);
863 if (nesting == CNL_TOP) {
864 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
865 counters->ctc_nr_held += delta;
866 LASSERT(counters->ctc_nr_held >= 0);
871 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
872 * cl_lock_hold_mod() for the explanation of the debugging code.
874 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
877 struct cl_thread_counters *counters;
878 enum clt_nesting_level nesting;
880 lock->cll_users += delta;
881 nesting = cl_lock_nesting(lock);
882 if (nesting == CNL_TOP) {
883 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
884 counters->ctc_nr_used += delta;
885 LASSERT(counters->ctc_nr_used >= 0);
889 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
890 const char *scope, const void *source)
892 LINVRNT(cl_lock_is_mutexed(lock));
893 LINVRNT(cl_lock_invariant(env, lock));
894 LASSERT(lock->cll_holds > 0);
897 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
898 lu_ref_del(&lock->cll_holders, scope, source);
899 cl_lock_hold_mod(env, lock, -1);
900 if (lock->cll_holds == 0) {
901 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
902 lock->cll_descr.cld_mode == CLM_GROUP)
904 * If lock is still phantom or grouplock when user is
905 * done with it---destroy the lock.
907 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
908 if (lock->cll_flags & CLF_CANCELPEND) {
909 lock->cll_flags &= ~CLF_CANCELPEND;
910 cl_lock_cancel0(env, lock);
912 if (lock->cll_flags & CLF_DOOMED) {
913 /* no longer doomed: it's dead... Jim. */
914 lock->cll_flags &= ~CLF_DOOMED;
915 cl_lock_delete0(env, lock);
922 * Waits until lock state is changed.
924 * This function is called with cl_lock mutex locked, atomically releases
925 * mutex and goes to sleep, waiting for a lock state change (signaled by
926 * cl_lock_signal()), and re-acquires the mutex before return.
928 * This function is used to wait until lock state machine makes some progress
929 * and to emulate synchronous operations on top of asynchronous lock
932 * \retval -EINTR wait was interrupted
934 * \retval 0 wait wasn't interrupted
936 * \pre cl_lock_is_mutexed(lock)
938 * \see cl_lock_signal()
940 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
942 cfs_waitlink_t waiter;
943 cfs_sigset_t blocked;
947 LINVRNT(cl_lock_is_mutexed(lock));
948 LINVRNT(cl_lock_invariant(env, lock));
949 LASSERT(lock->cll_depth == 1);
950 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
952 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
953 result = lock->cll_error;
955 /* To avoid being interrupted by the 'non-fatal' signals
956 * (SIGCHLD, for instance), we'd block them temporarily.
958 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
960 cfs_waitlink_init(&waiter);
961 cfs_waitq_add(&lock->cll_wq, &waiter);
962 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
963 cl_lock_mutex_put(env, lock);
965 LASSERT(cl_lock_nr_mutexed(env) == 0);
966 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
968 cl_lock_mutex_get(env, lock);
969 cfs_set_current_state(CFS_TASK_RUNNING);
970 cfs_waitq_del(&lock->cll_wq, &waiter);
971 result = cfs_signal_pending() ? -EINTR : 0;
973 /* Restore old blocked signals */
974 cfs_restore_sigs(blocked);
978 EXPORT_SYMBOL(cl_lock_state_wait);
980 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
981 enum cl_lock_state state)
983 const struct cl_lock_slice *slice;
986 LINVRNT(cl_lock_is_mutexed(lock));
987 LINVRNT(cl_lock_invariant(env, lock));
989 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
990 if (slice->cls_ops->clo_state != NULL)
991 slice->cls_ops->clo_state(env, slice, state);
992 cfs_waitq_broadcast(&lock->cll_wq);
997 * Notifies waiters that lock state changed.
999 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1000 * layers about state change by calling cl_lock_operations::clo_state()
1003 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1006 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1007 cl_lock_state_signal(env, lock, lock->cll_state);
1010 EXPORT_SYMBOL(cl_lock_signal);
1013 * Changes lock state.
1015 * This function is invoked to notify layers that lock state changed, possible
1016 * as a result of an asynchronous event such as call-back reception.
1018 * \post lock->cll_state == state
1020 * \see cl_lock_operations::clo_state()
1022 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1023 enum cl_lock_state state)
1025 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1028 LASSERT(lock->cll_state <= state ||
1029 (lock->cll_state == CLS_CACHED &&
1030 (state == CLS_HELD || /* lock found in cache */
1031 state == CLS_NEW || /* sub-lock canceled */
1032 state == CLS_INTRANSIT)) ||
1033 /* lock is in transit state */
1034 lock->cll_state == CLS_INTRANSIT);
1036 if (lock->cll_state != state) {
1037 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1038 cfs_atomic_inc(&site->cs_locks_state[state]);
1040 cl_lock_state_signal(env, lock, state);
1041 lock->cll_state = state;
1045 EXPORT_SYMBOL(cl_lock_state_set);
1047 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1049 const struct cl_lock_slice *slice;
1055 LINVRNT(cl_lock_is_mutexed(lock));
1056 LINVRNT(cl_lock_invariant(env, lock));
1057 LASSERT(lock->cll_state == CLS_INTRANSIT);
1060 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1062 if (slice->cls_ops->clo_unuse != NULL) {
1063 result = slice->cls_ops->clo_unuse(env, slice);
1068 LASSERT(result != -ENOSYS);
1069 } while (result == CLO_REPEAT);
1075 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1076 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1077 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1078 * use process atomic
1080 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1082 const struct cl_lock_slice *slice;
1084 enum cl_lock_state state;
1087 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1089 LASSERT(lock->cll_state == CLS_CACHED);
1090 if (lock->cll_error)
1091 RETURN(lock->cll_error);
1094 state = cl_lock_intransit(env, lock);
1095 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1096 if (slice->cls_ops->clo_use != NULL) {
1097 result = slice->cls_ops->clo_use(env, slice);
1102 LASSERT(result != -ENOSYS);
1104 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1110 if (result == -ESTALE) {
1112 * ESTALE means sublock being cancelled
1113 * at this time, and set lock state to
1114 * be NEW here and ask the caller to repeat.
1117 result = CLO_REPEAT;
1120 /* @atomic means back-off-on-failure. */
1123 rc = cl_unuse_try_internal(env, lock);
1124 /* Vet the results. */
1125 if (rc < 0 && result > 0)
1130 cl_lock_extransit(env, lock, state);
1133 EXPORT_SYMBOL(cl_use_try);
1136 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1139 static int cl_enqueue_kick(const struct lu_env *env,
1140 struct cl_lock *lock,
1141 struct cl_io *io, __u32 flags)
1144 const struct cl_lock_slice *slice;
1148 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1149 if (slice->cls_ops->clo_enqueue != NULL) {
1150 result = slice->cls_ops->clo_enqueue(env,
1156 LASSERT(result != -ENOSYS);
1161 * Tries to enqueue a lock.
1163 * This function is called repeatedly by cl_enqueue() until either lock is
1164 * enqueued, or error occurs. This function does not block waiting for
1165 * networking communication to complete.
1167 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1168 * lock->cll_state == CLS_HELD)
1170 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1171 * \see cl_lock_state::CLS_ENQUEUED
1173 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1174 struct cl_io *io, __u32 flags)
1179 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1183 LINVRNT(cl_lock_is_mutexed(lock));
1185 if (lock->cll_error != 0)
1187 switch (lock->cll_state) {
1189 cl_lock_state_set(env, lock, CLS_QUEUING);
1193 result = cl_enqueue_kick(env, lock, io, flags);
1195 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1198 LASSERT(cl_lock_is_intransit(lock));
1202 /* yank lock from the cache. */
1203 result = cl_use_try(env, lock, 0);
1212 * impossible, only held locks with increased
1213 * ->cll_holds can be enqueued, and they cannot be
1218 } while (result == CLO_REPEAT);
1220 cl_lock_error(env, lock, result);
1221 RETURN(result ?: lock->cll_error);
1223 EXPORT_SYMBOL(cl_enqueue_try);
1226 * Cancel the conflicting lock found during previous enqueue.
1228 * \retval 0 conflicting lock has been canceled.
1229 * \retval -ve error code.
1231 int cl_lock_enqueue_wait(const struct lu_env *env,
1232 struct cl_lock *lock,
1235 struct cl_lock *conflict;
1239 LASSERT(cl_lock_is_mutexed(lock));
1240 LASSERT(lock->cll_state == CLS_QUEUING);
1241 LASSERT(lock->cll_conflict != NULL);
1243 conflict = lock->cll_conflict;
1244 lock->cll_conflict = NULL;
1246 cl_lock_mutex_put(env, lock);
1247 LASSERT(cl_lock_nr_mutexed(env) == 0);
1249 cl_lock_mutex_get(env, conflict);
1250 cl_lock_cancel(env, conflict);
1251 cl_lock_delete(env, conflict);
1253 while (conflict->cll_state != CLS_FREEING) {
1254 rc = cl_lock_state_wait(env, conflict);
1258 cl_lock_mutex_put(env, conflict);
1259 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1260 cl_lock_put(env, conflict);
1263 cl_lock_mutex_get(env, lock);
1268 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1270 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1271 struct cl_io *io, __u32 enqflags)
1277 LINVRNT(cl_lock_is_mutexed(lock));
1278 LINVRNT(cl_lock_invariant(env, lock));
1279 LASSERT(lock->cll_holds > 0);
1281 cl_lock_user_add(env, lock);
1283 result = cl_enqueue_try(env, lock, io, enqflags);
1284 if (result == CLO_WAIT) {
1285 if (lock->cll_conflict != NULL)
1286 result = cl_lock_enqueue_wait(env, lock, 1);
1288 result = cl_lock_state_wait(env, lock);
1295 cl_lock_user_del(env, lock);
1296 cl_lock_error(env, lock, result);
1298 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1299 lock->cll_state == CLS_ENQUEUED ||
1300 lock->cll_state == CLS_HELD));
1307 * \pre current thread or io owns a hold on lock.
1309 * \post ergo(result == 0, lock->users increased)
1310 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1311 * lock->cll_state == CLS_HELD)
1313 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1314 struct cl_io *io, __u32 enqflags)
1320 cl_lock_lockdep_acquire(env, lock, enqflags);
1321 cl_lock_mutex_get(env, lock);
1322 result = cl_enqueue_locked(env, lock, io, enqflags);
1323 cl_lock_mutex_put(env, lock);
1325 cl_lock_lockdep_release(env, lock);
1326 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1327 lock->cll_state == CLS_HELD));
1330 EXPORT_SYMBOL(cl_enqueue);
1333 * Tries to unlock a lock.
1335 * This function is called repeatedly by cl_unuse() until either lock is
1336 * unlocked, or error occurs.
1337 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1339 * \pre lock->cll_state == CLS_HELD
1341 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1343 * \see cl_unuse() cl_lock_operations::clo_unuse()
1344 * \see cl_lock_state::CLS_CACHED
1346 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1349 enum cl_lock_state state = CLS_NEW;
1352 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1354 LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1355 if (lock->cll_users > 1) {
1356 cl_lock_user_del(env, lock);
1361 * New lock users (->cll_users) are not protecting unlocking
1362 * from proceeding. From this point, lock eventually reaches
1363 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1366 state = cl_lock_intransit(env, lock);
1368 result = cl_unuse_try_internal(env, lock);
1369 LASSERT(lock->cll_state == CLS_INTRANSIT);
1370 LASSERT(result != CLO_WAIT);
1371 cl_lock_user_del(env, lock);
1372 if (result == 0 || result == -ESTALE) {
1374 * Return lock back to the cache. This is the only
1375 * place where lock is moved into CLS_CACHED state.
1377 * If one of ->clo_unuse() methods returned -ESTALE, lock
1378 * cannot be placed into cache and has to be
1379 * re-initialized. This happens e.g., when a sub-lock was
1380 * canceled while unlocking was in progress.
1382 if (state == CLS_HELD && result == 0)
1386 cl_lock_extransit(env, lock, state);
1389 * Hide -ESTALE error.
1390 * If the lock is a glimpse lock, and it has multiple
1391 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1392 * and other sublocks are matched write locks. In this case,
1393 * we can't set this lock to error because otherwise some of
1394 * its sublocks may not be canceled. This causes some dirty
1395 * pages won't be written to OSTs. -jay
1399 CERROR("result = %d, this is unlikely!\n", result);
1400 cl_lock_extransit(env, lock, state);
1403 result = result ?: lock->cll_error;
1405 cl_lock_error(env, lock, result);
1408 EXPORT_SYMBOL(cl_unuse_try);
1410 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1415 result = cl_unuse_try(env, lock);
1417 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1425 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1428 cl_lock_mutex_get(env, lock);
1429 cl_unuse_locked(env, lock);
1430 cl_lock_mutex_put(env, lock);
1431 cl_lock_lockdep_release(env, lock);
1434 EXPORT_SYMBOL(cl_unuse);
1437 * Tries to wait for a lock.
1439 * This function is called repeatedly by cl_wait() until either lock is
1440 * granted, or error occurs. This function does not block waiting for network
1441 * communication to complete.
1443 * \see cl_wait() cl_lock_operations::clo_wait()
1444 * \see cl_lock_state::CLS_HELD
1446 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1448 const struct cl_lock_slice *slice;
1452 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1454 LINVRNT(cl_lock_is_mutexed(lock));
1455 LINVRNT(cl_lock_invariant(env, lock));
1456 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1457 lock->cll_state == CLS_HELD ||
1458 lock->cll_state == CLS_INTRANSIT);
1459 LASSERT(lock->cll_users > 0);
1460 LASSERT(lock->cll_holds > 0);
1463 if (lock->cll_error != 0)
1466 if (cl_lock_is_intransit(lock)) {
1471 if (lock->cll_state == CLS_HELD)
1476 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1477 if (slice->cls_ops->clo_wait != NULL) {
1478 result = slice->cls_ops->clo_wait(env, slice);
1483 LASSERT(result != -ENOSYS);
1485 LASSERT(lock->cll_state != CLS_INTRANSIT);
1486 cl_lock_state_set(env, lock, CLS_HELD);
1488 } while (result == CLO_REPEAT);
1489 RETURN(result ?: lock->cll_error);
1491 EXPORT_SYMBOL(cl_wait_try);
1494 * Waits until enqueued lock is granted.
1496 * \pre current thread or io owns a hold on the lock
1497 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1498 * lock->cll_state == CLS_HELD)
1500 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1502 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1507 cl_lock_mutex_get(env, lock);
1509 LINVRNT(cl_lock_invariant(env, lock));
1510 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1511 "Wrong state %d \n", lock->cll_state);
1512 LASSERT(lock->cll_holds > 0);
1515 result = cl_wait_try(env, lock);
1516 if (result == CLO_WAIT) {
1517 result = cl_lock_state_wait(env, lock);
1524 cl_lock_user_del(env, lock);
1525 cl_lock_error(env, lock, result);
1526 cl_lock_lockdep_release(env, lock);
1528 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1529 cl_lock_mutex_put(env, lock);
1530 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1533 EXPORT_SYMBOL(cl_wait);
1536 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1539 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1541 const struct cl_lock_slice *slice;
1542 unsigned long pound;
1543 unsigned long ounce;
1546 LINVRNT(cl_lock_is_mutexed(lock));
1547 LINVRNT(cl_lock_invariant(env, lock));
1550 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1551 if (slice->cls_ops->clo_weigh != NULL) {
1552 ounce = slice->cls_ops->clo_weigh(env, slice);
1554 if (pound < ounce) /* over-weight^Wflow */
1560 EXPORT_SYMBOL(cl_lock_weigh);
1563 * Notifies layers that lock description changed.
1565 * The server can grant client a lock different from one that was requested
1566 * (e.g., larger in extent). This method is called when actually granted lock
1567 * description becomes known to let layers to accommodate for changed lock
1570 * \see cl_lock_operations::clo_modify()
1572 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1573 const struct cl_lock_descr *desc)
1575 const struct cl_lock_slice *slice;
1576 struct cl_object *obj = lock->cll_descr.cld_obj;
1577 struct cl_object_header *hdr = cl_object_header(obj);
1581 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1582 /* don't allow object to change */
1583 LASSERT(obj == desc->cld_obj);
1584 LINVRNT(cl_lock_is_mutexed(lock));
1585 LINVRNT(cl_lock_invariant(env, lock));
1587 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1588 if (slice->cls_ops->clo_modify != NULL) {
1589 result = slice->cls_ops->clo_modify(env, slice, desc);
1594 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1595 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1597 * Just replace description in place. Nothing more is needed for
1598 * now. If locks were indexed according to their extent and/or mode,
1599 * that index would have to be updated here.
1601 cfs_spin_lock(&hdr->coh_lock_guard);
1602 lock->cll_descr = *desc;
1603 cfs_spin_unlock(&hdr->coh_lock_guard);
1606 EXPORT_SYMBOL(cl_lock_modify);
1609 * Initializes lock closure with a given origin.
1611 * \see cl_lock_closure
1613 void cl_lock_closure_init(const struct lu_env *env,
1614 struct cl_lock_closure *closure,
1615 struct cl_lock *origin, int wait)
1617 LINVRNT(cl_lock_is_mutexed(origin));
1618 LINVRNT(cl_lock_invariant(env, origin));
1620 CFS_INIT_LIST_HEAD(&closure->clc_list);
1621 closure->clc_origin = origin;
1622 closure->clc_wait = wait;
1623 closure->clc_nr = 0;
1625 EXPORT_SYMBOL(cl_lock_closure_init);
1628 * Builds a closure of \a lock.
1630 * Building of a closure consists of adding initial lock (\a lock) into it,
1631 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1632 * methods might call cl_lock_closure_build() recursively again, adding more
1633 * locks to the closure, etc.
1635 * \see cl_lock_closure
1637 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1638 struct cl_lock_closure *closure)
1640 const struct cl_lock_slice *slice;
1644 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1645 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1647 result = cl_lock_enclosure(env, lock, closure);
1649 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1650 if (slice->cls_ops->clo_closure != NULL) {
1651 result = slice->cls_ops->clo_closure(env, slice,
1659 cl_lock_disclosure(env, closure);
1662 EXPORT_SYMBOL(cl_lock_closure_build);
1665 * Adds new lock to a closure.
1667 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1668 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1669 * until next try-lock is likely to succeed.
1671 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1672 struct cl_lock_closure *closure)
1676 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1677 if (!cl_lock_mutex_try(env, lock)) {
1679 * If lock->cll_inclosure is not empty, lock is already in
1682 if (cfs_list_empty(&lock->cll_inclosure)) {
1683 cl_lock_get_trust(lock);
1684 lu_ref_add(&lock->cll_reference, "closure", closure);
1685 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1688 cl_lock_mutex_put(env, lock);
1691 cl_lock_disclosure(env, closure);
1692 if (closure->clc_wait) {
1693 cl_lock_get_trust(lock);
1694 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1695 cl_lock_mutex_put(env, closure->clc_origin);
1697 LASSERT(cl_lock_nr_mutexed(env) == 0);
1698 cl_lock_mutex_get(env, lock);
1699 cl_lock_mutex_put(env, lock);
1701 cl_lock_mutex_get(env, closure->clc_origin);
1702 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1703 cl_lock_put(env, lock);
1705 result = CLO_REPEAT;
1709 EXPORT_SYMBOL(cl_lock_enclosure);
1711 /** Releases mutices of enclosed locks. */
1712 void cl_lock_disclosure(const struct lu_env *env,
1713 struct cl_lock_closure *closure)
1715 struct cl_lock *scan;
1716 struct cl_lock *temp;
1718 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1719 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1721 cfs_list_del_init(&scan->cll_inclosure);
1722 cl_lock_mutex_put(env, scan);
1723 lu_ref_del(&scan->cll_reference, "closure", closure);
1724 cl_lock_put(env, scan);
1727 LASSERT(closure->clc_nr == 0);
1729 EXPORT_SYMBOL(cl_lock_disclosure);
1731 /** Finalizes a closure. */
1732 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1734 LASSERT(closure->clc_nr == 0);
1735 LASSERT(cfs_list_empty(&closure->clc_list));
1737 EXPORT_SYMBOL(cl_lock_closure_fini);
1740 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1741 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1742 * destruction until all holds are released. This is called when a decision is
1743 * made to destroy the lock in the future. E.g., when a blocking AST is
1744 * received on it, or fatal communication error happens.
1746 * Caller must have a reference on this lock to prevent a situation, when
1747 * deleted lock lingers in memory for indefinite time, because nobody calls
1748 * cl_lock_put() to finish it.
1750 * \pre atomic_read(&lock->cll_ref) > 0
1751 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1752 * cl_lock_nr_mutexed(env) == 1)
1753 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1754 * held, as deletion of sub-locks might require releasing a top-lock
1757 * \see cl_lock_operations::clo_delete()
1758 * \see cl_lock::cll_holds
1760 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1762 LINVRNT(cl_lock_is_mutexed(lock));
1763 LINVRNT(cl_lock_invariant(env, lock));
1764 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1765 cl_lock_nr_mutexed(env) == 1));
1768 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1769 if (lock->cll_holds == 0)
1770 cl_lock_delete0(env, lock);
1772 lock->cll_flags |= CLF_DOOMED;
1775 EXPORT_SYMBOL(cl_lock_delete);
1778 * Mark lock as irrecoverably failed, and mark it for destruction. This
1779 * happens when, e.g., server fails to grant a lock to us, or networking
1782 * \pre atomic_read(&lock->cll_ref) > 0
1784 * \see clo_lock_delete()
1785 * \see cl_lock::cll_holds
1787 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1789 LINVRNT(cl_lock_is_mutexed(lock));
1790 LINVRNT(cl_lock_invariant(env, lock));
1793 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1794 if (lock->cll_error == 0 && error != 0) {
1795 lock->cll_error = error;
1796 cl_lock_signal(env, lock);
1797 cl_lock_cancel(env, lock);
1798 cl_lock_delete(env, lock);
1802 EXPORT_SYMBOL(cl_lock_error);
1805 * Cancels this lock. Notifies layers
1806 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1807 * there are holds on the lock, postpone cancellation until
1808 * all holds are released.
1810 * Cancellation notification is delivered to layers at most once.
1812 * \see cl_lock_operations::clo_cancel()
1813 * \see cl_lock::cll_holds
1815 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1817 LINVRNT(cl_lock_is_mutexed(lock));
1818 LINVRNT(cl_lock_invariant(env, lock));
1821 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1822 if (lock->cll_holds == 0)
1823 cl_lock_cancel0(env, lock);
1825 lock->cll_flags |= CLF_CANCELPEND;
1828 EXPORT_SYMBOL(cl_lock_cancel);
1831 * Finds an existing lock covering given page and optionally different from a
1832 * given \a except lock.
1834 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1835 struct cl_page *page, struct cl_lock *except,
1836 int pending, int canceld)
1838 struct cl_object_header *head;
1839 struct cl_lock *scan;
1840 struct cl_lock *lock;
1841 struct cl_lock_descr *need;
1845 head = cl_object_header(obj);
1846 need = &cl_env_info(env)->clt_descr;
1849 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1851 need->cld_start = need->cld_end = page->cp_index;
1852 need->cld_enq_flags = 0;
1854 cfs_spin_lock(&head->coh_lock_guard);
1855 /* It is fine to match any group lock since there could be only one
1856 * with a uniq gid and it conflicts with all other lock modes too */
1857 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1858 if (scan != except &&
1859 (scan->cll_descr.cld_mode == CLM_GROUP ||
1860 cl_lock_ext_match(&scan->cll_descr, need)) &&
1861 scan->cll_state >= CLS_HELD &&
1862 scan->cll_state < CLS_FREEING &&
1864 * This check is racy as the lock can be canceled right
1865 * after it is done, but this is fine, because page exists
1868 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1869 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1870 /* Don't increase cs_hit here since this
1871 * is just a helper function. */
1872 cl_lock_get_trust(scan);
1877 cfs_spin_unlock(&head->coh_lock_guard);
1880 EXPORT_SYMBOL(cl_lock_at_page);
1883 * Calculate the page offset at the layer of @lock.
1884 * At the time of this writing, @page is top page and @lock is sub lock.
1886 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1888 struct lu_device_type *dtype;
1889 const struct cl_page_slice *slice;
1891 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1892 slice = cl_page_at(page, dtype);
1893 LASSERT(slice != NULL);
1894 return slice->cpl_page->cp_index;
1898 * Check if page @page is covered by an extra lock or discard it.
1900 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1901 struct cl_page *page, void *cbdata)
1903 struct cl_thread_info *info = cl_env_info(env);
1904 struct cl_lock *lock = cbdata;
1905 pgoff_t index = pgoff_at_lock(page, lock);
1907 if (index >= info->clt_fn_index) {
1908 struct cl_lock *tmp;
1910 /* refresh non-overlapped index */
1911 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
1914 /* Cache the first-non-overlapped index so as to skip
1915 * all pages within [index, clt_fn_index). This
1916 * is safe because if tmp lock is canceled, it will
1917 * discard these pages. */
1918 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1919 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1920 info->clt_fn_index = CL_PAGE_EOF;
1921 cl_lock_put(env, tmp);
1922 } else if (cl_page_own(env, io, page) == 0) {
1923 /* discard the page */
1924 cl_page_unmap(env, io, page);
1925 cl_page_discard(env, io, page);
1926 cl_page_disown(env, io, page);
1928 LASSERT(page->cp_state == CPS_FREEING);
1932 info->clt_next_index = index + 1;
1933 return CLP_GANG_OKAY;
1936 static int pageout_cb(const struct lu_env *env, struct cl_io *io,
1937 struct cl_page *page, void *cbdata)
1939 struct cl_thread_info *info = cl_env_info(env);
1940 struct cl_page_list *queue = &info->clt_queue.c2_qin;
1941 struct cl_lock *lock = cbdata;
1942 typeof(cl_page_own) *page_own;
1943 int rc = CLP_GANG_OKAY;
1945 page_own = queue->pl_nr ? cl_page_own_try : cl_page_own;
1946 if (page_own(env, io, page) == 0) {
1947 cl_page_list_add(queue, page);
1948 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1949 } else if (page->cp_state != CPS_FREEING) {
1950 /* cl_page_own() won't fail unless
1951 * the page is being freed. */
1952 LASSERT(queue->pl_nr != 0);
1953 rc = CLP_GANG_AGAIN;
1960 * Invalidate pages protected by the given lock, sending them out to the
1961 * server first, if necessary.
1963 * This function does the following:
1965 * - collects a list of pages to be invalidated,
1967 * - unmaps them from the user virtual memory,
1969 * - sends dirty pages to the server,
1971 * - waits for transfer completion,
1973 * - discards pages, and throws them out of memory.
1975 * If \a discard is set, pages are discarded without sending them to the
1978 * If error happens on any step, the process continues anyway (the reasoning
1979 * behind this being that lock cancellation cannot be delayed indefinitely).
1981 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1984 struct cl_thread_info *info = cl_env_info(env);
1985 struct cl_io *io = &info->clt_io;
1986 struct cl_2queue *queue = &info->clt_queue;
1987 struct cl_lock_descr *descr = &lock->cll_descr;
1988 cl_page_gang_cb_t cb;
1993 LINVRNT(cl_lock_invariant(env, lock));
1996 io->ci_obj = cl_object_top(descr->cld_obj);
1997 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2001 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : pageout_cb;
2002 info->clt_fn_index = info->clt_next_index = descr->cld_start;
2004 cl_2queue_init(queue);
2005 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2006 info->clt_next_index, descr->cld_end,
2008 page_count = queue->c2_qin.pl_nr;
2009 if (page_count > 0) {
2010 /* must be writeback case */
2011 LASSERTF(descr->cld_mode >= CLM_WRITE, "lock mode %s\n",
2012 cl_lock_mode_name(descr->cld_mode));
2014 result = cl_page_list_unmap(env, io, &queue->c2_qin);
2016 long timeout = 600; /* 10 minutes. */
2017 /* for debug purpose, if this request can't be
2018 * finished in 10 minutes, we hope it can
2021 result = cl_io_submit_sync(env, io, CRT_WRITE,
2025 CWARN("Writing %lu pages error: %d\n",
2026 page_count, result);
2028 cl_2queue_discard(env, io, queue);
2029 cl_2queue_disown(env, io, queue);
2030 cl_2queue_fini(env, queue);
2033 if (info->clt_next_index > descr->cld_end)
2036 if (res == CLP_GANG_RESCHED)
2038 } while (res != CLP_GANG_OKAY);
2040 cl_io_fini(env, io);
2043 EXPORT_SYMBOL(cl_lock_page_out);
2046 * Eliminate all locks for a given object.
2048 * Caller has to guarantee that no lock is in active use.
2050 * \param cancel when this is set, cl_locks_prune() cancels locks before
2053 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2055 struct cl_object_header *head;
2056 struct cl_lock *lock;
2059 head = cl_object_header(obj);
2061 * If locks are destroyed without cancellation, all pages must be
2062 * already destroyed (as otherwise they will be left unprotected).
2064 LASSERT(ergo(!cancel,
2065 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2067 cfs_spin_lock(&head->coh_lock_guard);
2068 while (!cfs_list_empty(&head->coh_locks)) {
2069 lock = container_of(head->coh_locks.next,
2070 struct cl_lock, cll_linkage);
2071 cl_lock_get_trust(lock);
2072 cfs_spin_unlock(&head->coh_lock_guard);
2073 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2076 cl_lock_mutex_get(env, lock);
2077 if (lock->cll_state < CLS_FREEING) {
2078 LASSERT(lock->cll_holds == 0);
2079 LASSERT(lock->cll_users <= 1);
2080 if (unlikely(lock->cll_users == 1)) {
2081 struct l_wait_info lwi = { 0 };
2083 cl_lock_mutex_put(env, lock);
2084 l_wait_event(lock->cll_wq,
2085 lock->cll_users == 0,
2091 cl_lock_cancel(env, lock);
2092 cl_lock_delete(env, lock);
2094 cl_lock_mutex_put(env, lock);
2095 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2096 cl_lock_put(env, lock);
2097 cfs_spin_lock(&head->coh_lock_guard);
2099 cfs_spin_unlock(&head->coh_lock_guard);
2102 EXPORT_SYMBOL(cl_locks_prune);
2104 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2105 const struct cl_io *io,
2106 const struct cl_lock_descr *need,
2107 const char *scope, const void *source)
2109 struct cl_lock *lock;
2114 lock = cl_lock_find(env, io, need);
2117 cl_lock_mutex_get(env, lock);
2118 if (lock->cll_state < CLS_FREEING &&
2119 !(lock->cll_flags & CLF_CANCELLED)) {
2120 cl_lock_hold_mod(env, lock, +1);
2121 lu_ref_add(&lock->cll_holders, scope, source);
2122 lu_ref_add(&lock->cll_reference, scope, source);
2125 cl_lock_mutex_put(env, lock);
2126 cl_lock_put(env, lock);
2132 * Returns a lock matching \a need description with a reference and a hold on
2135 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2136 * guarantees that lock is not in the CLS_FREEING state on return.
2138 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2139 const struct cl_lock_descr *need,
2140 const char *scope, const void *source)
2142 struct cl_lock *lock;
2146 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2148 cl_lock_mutex_put(env, lock);
2151 EXPORT_SYMBOL(cl_lock_hold);
2154 * Main high-level entry point of cl_lock interface that finds existing or
2155 * enqueues new lock matching given description.
2157 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2158 const struct cl_lock_descr *need,
2159 const char *scope, const void *source)
2161 struct cl_lock *lock;
2163 __u32 enqflags = need->cld_enq_flags;
2167 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2171 rc = cl_enqueue_locked(env, lock, io, enqflags);
2173 if (cl_lock_fits_into(env, lock, need, io)) {
2174 if (!(enqflags & CEF_AGL)) {
2175 cl_lock_mutex_put(env, lock);
2176 cl_lock_lockdep_acquire(env, lock,
2182 cl_unuse_locked(env, lock);
2184 cl_lock_trace(D_DLMTRACE, env,
2185 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2186 cl_lock_hold_release(env, lock, scope, source);
2187 cl_lock_mutex_put(env, lock);
2188 lu_ref_del(&lock->cll_reference, scope, source);
2189 cl_lock_put(env, lock);
2191 LASSERT(enqflags & CEF_AGL);
2193 } else if (rc != 0) {
2199 EXPORT_SYMBOL(cl_lock_request);
2202 * Adds a hold to a known lock.
2204 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2205 const char *scope, const void *source)
2207 LINVRNT(cl_lock_is_mutexed(lock));
2208 LINVRNT(cl_lock_invariant(env, lock));
2209 LASSERT(lock->cll_state != CLS_FREEING);
2212 cl_lock_hold_mod(env, lock, +1);
2214 lu_ref_add(&lock->cll_holders, scope, source);
2215 lu_ref_add(&lock->cll_reference, scope, source);
2218 EXPORT_SYMBOL(cl_lock_hold_add);
2221 * Releases a hold and a reference on a lock, on which caller acquired a
2224 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2225 const char *scope, const void *source)
2227 LINVRNT(cl_lock_invariant(env, lock));
2229 cl_lock_hold_release(env, lock, scope, source);
2230 lu_ref_del(&lock->cll_reference, scope, source);
2231 cl_lock_put(env, lock);
2234 EXPORT_SYMBOL(cl_lock_unhold);
2237 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2239 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2240 const char *scope, const void *source)
2242 LINVRNT(cl_lock_invariant(env, lock));
2244 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2245 cl_lock_mutex_get(env, lock);
2246 cl_lock_hold_release(env, lock, scope, source);
2247 cl_lock_mutex_put(env, lock);
2248 lu_ref_del(&lock->cll_reference, scope, source);
2249 cl_lock_put(env, lock);
2252 EXPORT_SYMBOL(cl_lock_release);
2254 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2256 LINVRNT(cl_lock_is_mutexed(lock));
2257 LINVRNT(cl_lock_invariant(env, lock));
2260 cl_lock_used_mod(env, lock, +1);
2263 EXPORT_SYMBOL(cl_lock_user_add);
2265 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2267 LINVRNT(cl_lock_is_mutexed(lock));
2268 LINVRNT(cl_lock_invariant(env, lock));
2269 LASSERT(lock->cll_users > 0);
2272 cl_lock_used_mod(env, lock, -1);
2273 if (lock->cll_users == 0)
2274 cfs_waitq_broadcast(&lock->cll_wq);
2277 EXPORT_SYMBOL(cl_lock_user_del);
2279 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2281 static const char *names[] = {
2282 [CLM_PHANTOM] = "P",
2287 if (0 <= mode && mode < ARRAY_SIZE(names))
2292 EXPORT_SYMBOL(cl_lock_mode_name);
2295 * Prints human readable representation of a lock description.
2297 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2298 lu_printer_t printer,
2299 const struct cl_lock_descr *descr)
2301 const struct lu_fid *fid;
2303 fid = lu_object_fid(&descr->cld_obj->co_lu);
2304 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2306 EXPORT_SYMBOL(cl_lock_descr_print);
2309 * Prints human readable representation of \a lock to the \a f.
2311 void cl_lock_print(const struct lu_env *env, void *cookie,
2312 lu_printer_t printer, const struct cl_lock *lock)
2314 const struct cl_lock_slice *slice;
2315 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2316 lock, cfs_atomic_read(&lock->cll_ref),
2317 lock->cll_state, lock->cll_error, lock->cll_holds,
2318 lock->cll_users, lock->cll_flags);
2319 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2320 (*printer)(env, cookie, " {\n");
2322 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2323 (*printer)(env, cookie, " %s@%p: ",
2324 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2326 if (slice->cls_ops->clo_print != NULL)
2327 slice->cls_ops->clo_print(env, cookie, printer, slice);
2328 (*printer)(env, cookie, "\n");
2330 (*printer)(env, cookie, "} lock@%p\n", lock);
2332 EXPORT_SYMBOL(cl_lock_print);
2334 int cl_lock_init(void)
2336 return lu_kmem_init(cl_lock_caches);
2339 void cl_lock_fini(void)
2341 lu_kmem_fini(cl_lock_caches);