4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /** Lock class of cl_lock::cll_guard */
57 static cfs_lock_class_key_t cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
60 static struct lu_kmem_descr cl_lock_caches[] = {
62 .ckd_cache = &cl_lock_kmem,
63 .ckd_name = "cl_lock_kmem",
64 .ckd_size = sizeof (struct cl_lock)
72 * Basic lock invariant that is maintained at all times. Caller either has a
73 * reference to \a lock, or somehow assures that \a lock cannot be freed.
75 * \see cl_lock_invariant()
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78 const struct cl_lock *lock)
80 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
81 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
82 lock->cll_holds >= lock->cll_users &&
83 lock->cll_holds >= 0 &&
84 lock->cll_users >= 0 &&
89 * Stronger lock invariant, checking that caller has a reference on a lock.
91 * \see cl_lock_invariant_trusted()
93 static int cl_lock_invariant(const struct lu_env *env,
94 const struct cl_lock *lock)
98 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
99 cl_lock_invariant_trusted(env, lock);
100 if (!result && env != NULL)
101 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
106 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
108 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
110 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
114 * Returns a set of counters for this lock, depending on a lock nesting.
116 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
117 const struct cl_lock *lock)
119 struct cl_thread_info *info;
120 enum clt_nesting_level nesting;
122 info = cl_env_info(env);
123 nesting = cl_lock_nesting(lock);
124 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
125 return &info->clt_counters[nesting];
128 static void cl_lock_trace0(int level, const struct lu_env *env,
129 const char *prefix, const struct cl_lock *lock,
130 const char *func, const int line)
132 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
133 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
134 "(%p/%d/%d) at %s():%d\n",
135 prefix, lock, cfs_atomic_read(&lock->cll_ref),
136 lock->cll_guarder, lock->cll_depth,
137 lock->cll_state, lock->cll_error, lock->cll_holds,
138 lock->cll_users, lock->cll_flags,
139 env, h->coh_nesting, cl_lock_nr_mutexed(env),
142 #define cl_lock_trace(level, env, prefix, lock) \
143 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
145 #define RETIP ((unsigned long)__builtin_return_address(0))
147 #ifdef CONFIG_LOCKDEP
148 static cfs_lock_class_key_t cl_lock_key;
150 static void cl_lock_lockdep_init(struct cl_lock *lock)
152 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
155 static void cl_lock_lockdep_acquire(const struct lu_env *env,
156 struct cl_lock *lock, __u32 enqflags)
158 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
159 #ifdef HAVE_LOCK_MAP_ACQUIRE
160 lock_map_acquire(&lock->dep_map);
161 #else /* HAVE_LOCK_MAP_ACQUIRE */
162 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
163 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
164 /* check: */ 2, RETIP);
165 #endif /* HAVE_LOCK_MAP_ACQUIRE */
168 static void cl_lock_lockdep_release(const struct lu_env *env,
169 struct cl_lock *lock)
171 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
172 lock_release(&lock->dep_map, 0, RETIP);
175 #else /* !CONFIG_LOCKDEP */
177 static void cl_lock_lockdep_init(struct cl_lock *lock)
179 static void cl_lock_lockdep_acquire(const struct lu_env *env,
180 struct cl_lock *lock, __u32 enqflags)
182 static void cl_lock_lockdep_release(const struct lu_env *env,
183 struct cl_lock *lock)
186 #endif /* !CONFIG_LOCKDEP */
189 * Adds lock slice to the compound lock.
191 * This is called by cl_object_operations::coo_lock_init() methods to add a
192 * per-layer state to the lock. New state is added at the end of
193 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
195 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
197 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
198 struct cl_object *obj,
199 const struct cl_lock_operations *ops)
202 slice->cls_lock = lock;
203 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
204 slice->cls_obj = obj;
205 slice->cls_ops = ops;
208 EXPORT_SYMBOL(cl_lock_slice_add);
211 * Returns true iff a lock with the mode \a has provides at least the same
212 * guarantees as a lock with the mode \a need.
214 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
216 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
217 need == CLM_PHANTOM || need == CLM_GROUP);
218 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
219 has == CLM_PHANTOM || has == CLM_GROUP);
220 CLASSERT(CLM_PHANTOM < CLM_READ);
221 CLASSERT(CLM_READ < CLM_WRITE);
222 CLASSERT(CLM_WRITE < CLM_GROUP);
224 if (has != CLM_GROUP)
229 EXPORT_SYMBOL(cl_lock_mode_match);
232 * Returns true iff extent portions of lock descriptions match.
234 int cl_lock_ext_match(const struct cl_lock_descr *has,
235 const struct cl_lock_descr *need)
238 has->cld_start <= need->cld_start &&
239 has->cld_end >= need->cld_end &&
240 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
241 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
243 EXPORT_SYMBOL(cl_lock_ext_match);
246 * Returns true iff a lock with the description \a has provides at least the
247 * same guarantees as a lock with the description \a need.
249 int cl_lock_descr_match(const struct cl_lock_descr *has,
250 const struct cl_lock_descr *need)
253 cl_object_same(has->cld_obj, need->cld_obj) &&
254 cl_lock_ext_match(has, need);
256 EXPORT_SYMBOL(cl_lock_descr_match);
258 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
260 struct cl_object *obj = lock->cll_descr.cld_obj;
262 LINVRNT(!cl_lock_is_mutexed(lock));
265 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
267 while (!cfs_list_empty(&lock->cll_layers)) {
268 struct cl_lock_slice *slice;
270 slice = cfs_list_entry(lock->cll_layers.next,
271 struct cl_lock_slice, cls_linkage);
272 cfs_list_del_init(lock->cll_layers.next);
273 slice->cls_ops->clo_fini(env, slice);
275 cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
276 cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
277 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
278 cl_object_put(env, obj);
279 lu_ref_fini(&lock->cll_reference);
280 lu_ref_fini(&lock->cll_holders);
281 cfs_mutex_destroy(&lock->cll_guard);
282 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
287 * Releases a reference on a lock.
289 * When last reference is released, lock is returned to the cache, unless it
290 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
293 * \see cl_object_put(), cl_page_put()
295 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
297 struct cl_object *obj;
298 struct cl_site *site;
300 LINVRNT(cl_lock_invariant(env, lock));
302 obj = lock->cll_descr.cld_obj;
303 LINVRNT(obj != NULL);
304 site = cl_object_site(obj);
306 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
307 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
309 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
310 if (lock->cll_state == CLS_FREEING) {
311 LASSERT(cfs_list_empty(&lock->cll_linkage));
312 cl_lock_free(env, lock);
314 cfs_atomic_dec(&site->cs_locks.cs_busy);
318 EXPORT_SYMBOL(cl_lock_put);
321 * Acquires an additional reference to a lock.
323 * This can be called only by caller already possessing a reference to \a
326 * \see cl_object_get(), cl_page_get()
328 void cl_lock_get(struct cl_lock *lock)
330 LINVRNT(cl_lock_invariant(NULL, lock));
331 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
332 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
333 cfs_atomic_inc(&lock->cll_ref);
335 EXPORT_SYMBOL(cl_lock_get);
338 * Acquires a reference to a lock.
340 * This is much like cl_lock_get(), except that this function can be used to
341 * acquire initial reference to the cached lock. Caller has to deal with all
342 * possible races. Use with care!
344 * \see cl_page_get_trust()
346 void cl_lock_get_trust(struct cl_lock *lock)
348 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
350 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
351 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
352 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
353 cfs_atomic_inc(&site->cs_locks.cs_busy);
355 EXPORT_SYMBOL(cl_lock_get_trust);
358 * Helper function destroying the lock that wasn't completely initialized.
360 * Other threads can acquire references to the top-lock through its
361 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
363 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
365 cl_lock_mutex_get(env, lock);
366 cl_lock_cancel(env, lock);
367 cl_lock_delete(env, lock);
368 cl_lock_mutex_put(env, lock);
369 cl_lock_put(env, lock);
372 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
373 struct cl_object *obj,
374 const struct cl_io *io,
375 const struct cl_lock_descr *descr)
377 struct cl_lock *lock;
378 struct lu_object_header *head;
379 struct cl_site *site = cl_object_site(obj);
382 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
384 cfs_atomic_set(&lock->cll_ref, 1);
385 lock->cll_descr = *descr;
386 lock->cll_state = CLS_NEW;
388 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
390 CFS_INIT_LIST_HEAD(&lock->cll_layers);
391 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
392 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
393 lu_ref_init(&lock->cll_reference);
394 lu_ref_init(&lock->cll_holders);
395 cfs_mutex_init(&lock->cll_guard);
396 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
397 cfs_waitq_init(&lock->cll_wq);
398 head = obj->co_lu.lo_header;
399 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
400 cfs_atomic_inc(&site->cs_locks.cs_total);
401 cfs_atomic_inc(&site->cs_locks.cs_created);
402 cl_lock_lockdep_init(lock);
403 cfs_list_for_each_entry(obj, &head->loh_layers,
407 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
409 cl_lock_finish(env, lock);
415 lock = ERR_PTR(-ENOMEM);
420 * Transfer the lock into INTRANSIT state and return the original state.
422 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
423 * \post state: CLS_INTRANSIT
426 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
427 struct cl_lock *lock)
429 enum cl_lock_state state = lock->cll_state;
431 LASSERT(cl_lock_is_mutexed(lock));
432 LASSERT(state != CLS_INTRANSIT);
433 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
434 "Malformed lock state %d.\n", state);
436 cl_lock_state_set(env, lock, CLS_INTRANSIT);
437 lock->cll_intransit_owner = cfs_current();
438 cl_lock_hold_add(env, lock, "intransit", cfs_current());
441 EXPORT_SYMBOL(cl_lock_intransit);
444 * Exit the intransit state and restore the lock state to the original state
446 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
447 enum cl_lock_state state)
449 LASSERT(cl_lock_is_mutexed(lock));
450 LASSERT(lock->cll_state == CLS_INTRANSIT);
451 LASSERT(state != CLS_INTRANSIT);
452 LASSERT(lock->cll_intransit_owner == cfs_current());
454 lock->cll_intransit_owner = NULL;
455 cl_lock_state_set(env, lock, state);
456 cl_lock_unhold(env, lock, "intransit", cfs_current());
458 EXPORT_SYMBOL(cl_lock_extransit);
461 * Checking whether the lock is intransit state
463 int cl_lock_is_intransit(struct cl_lock *lock)
465 LASSERT(cl_lock_is_mutexed(lock));
466 return lock->cll_state == CLS_INTRANSIT &&
467 lock->cll_intransit_owner != cfs_current();
469 EXPORT_SYMBOL(cl_lock_is_intransit);
471 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
472 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
473 * cover multiple stripes and can trigger cascading timeouts.
475 static int cl_lock_fits_into(const struct lu_env *env,
476 const struct cl_lock *lock,
477 const struct cl_lock_descr *need,
478 const struct cl_io *io)
480 const struct cl_lock_slice *slice;
482 LINVRNT(cl_lock_invariant_trusted(env, lock));
484 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
485 if (slice->cls_ops->clo_fits_into != NULL &&
486 !slice->cls_ops->clo_fits_into(env, slice, need, io))
492 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
493 struct cl_object *obj,
494 const struct cl_io *io,
495 const struct cl_lock_descr *need)
497 struct cl_lock *lock;
498 struct cl_object_header *head;
499 struct cl_site *site;
503 head = cl_object_header(obj);
504 site = cl_object_site(obj);
505 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
506 cfs_atomic_inc(&site->cs_locks.cs_lookup);
507 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
510 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
511 lock->cll_state < CLS_FREEING &&
512 lock->cll_error == 0 &&
513 !(lock->cll_flags & CLF_CANCELLED) &&
514 cl_lock_fits_into(env, lock, need, io);
515 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
516 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
519 cl_lock_get_trust(lock);
520 cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
528 * Returns a lock matching description \a need.
530 * This is the main entry point into the cl_lock caching interface. First, a
531 * cache (implemented as a per-object linked list) is consulted. If lock is
532 * found there, it is returned immediately. Otherwise new lock is allocated
533 * and returned. In any case, additional reference to lock is acquired.
535 * \see cl_object_find(), cl_page_find()
537 static struct cl_lock *cl_lock_find(const struct lu_env *env,
538 const struct cl_io *io,
539 const struct cl_lock_descr *need)
541 struct cl_object_header *head;
542 struct cl_object *obj;
543 struct cl_lock *lock;
544 struct cl_site *site;
549 head = cl_object_header(obj);
550 site = cl_object_site(obj);
552 cfs_spin_lock(&head->coh_lock_guard);
553 lock = cl_lock_lookup(env, obj, io, need);
554 cfs_spin_unlock(&head->coh_lock_guard);
557 lock = cl_lock_alloc(env, obj, io, need);
559 struct cl_lock *ghost;
561 cfs_spin_lock(&head->coh_lock_guard);
562 ghost = cl_lock_lookup(env, obj, io, need);
564 cfs_list_add_tail(&lock->cll_linkage,
566 cfs_spin_unlock(&head->coh_lock_guard);
567 cfs_atomic_inc(&site->cs_locks.cs_busy);
569 cfs_spin_unlock(&head->coh_lock_guard);
571 * Other threads can acquire references to the
572 * top-lock through its sub-locks. Hence, it
573 * cannot be cl_lock_free()-ed immediately.
575 cl_lock_finish(env, lock);
584 * Returns existing lock matching given description. This is similar to
585 * cl_lock_find() except that no new lock is created, and returned lock is
586 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
588 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
589 const struct cl_lock_descr *need,
590 const char *scope, const void *source)
592 struct cl_object_header *head;
593 struct cl_object *obj;
594 struct cl_lock *lock;
598 head = cl_object_header(obj);
600 cfs_spin_lock(&head->coh_lock_guard);
601 lock = cl_lock_lookup(env, obj, io, need);
602 cfs_spin_unlock(&head->coh_lock_guard);
607 cl_lock_mutex_get(env, lock);
608 if (lock->cll_state == CLS_INTRANSIT)
609 cl_lock_state_wait(env, lock); /* Don't care return value. */
610 if (lock->cll_state == CLS_CACHED) {
612 result = cl_use_try(env, lock, 1);
614 cl_lock_error(env, lock, result);
616 ok = lock->cll_state == CLS_HELD;
618 cl_lock_hold_add(env, lock, scope, source);
619 cl_lock_user_add(env, lock);
620 cl_lock_put(env, lock);
622 cl_lock_mutex_put(env, lock);
624 cl_lock_put(env, lock);
630 EXPORT_SYMBOL(cl_lock_peek);
633 * Returns a slice within a lock, corresponding to the given layer in the
638 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
639 const struct lu_device_type *dtype)
641 const struct cl_lock_slice *slice;
643 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
646 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
647 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
652 EXPORT_SYMBOL(cl_lock_at);
654 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
656 struct cl_thread_counters *counters;
658 counters = cl_lock_counters(env, lock);
660 counters->ctc_nr_locks_locked++;
661 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
662 cl_lock_trace(D_TRACE, env, "got mutex", lock);
666 * Locks cl_lock object.
668 * This is used to manipulate cl_lock fields, and to serialize state
669 * transitions in the lock state machine.
671 * \post cl_lock_is_mutexed(lock)
673 * \see cl_lock_mutex_put()
675 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
677 LINVRNT(cl_lock_invariant(env, lock));
679 if (lock->cll_guarder == cfs_current()) {
680 LINVRNT(cl_lock_is_mutexed(lock));
681 LINVRNT(lock->cll_depth > 0);
683 struct cl_object_header *hdr;
684 struct cl_thread_info *info;
687 LINVRNT(lock->cll_guarder != cfs_current());
688 hdr = cl_object_header(lock->cll_descr.cld_obj);
690 * Check that mutices are taken in the bottom-to-top order.
692 info = cl_env_info(env);
693 for (i = 0; i < hdr->coh_nesting; ++i)
694 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
695 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
696 lock->cll_guarder = cfs_current();
697 LINVRNT(lock->cll_depth == 0);
699 cl_lock_mutex_tail(env, lock);
701 EXPORT_SYMBOL(cl_lock_mutex_get);
704 * Try-locks cl_lock object.
706 * \retval 0 \a lock was successfully locked
708 * \retval -EBUSY \a lock cannot be locked right now
710 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
712 * \see cl_lock_mutex_get()
714 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
718 LINVRNT(cl_lock_invariant_trusted(env, lock));
722 if (lock->cll_guarder == cfs_current()) {
723 LINVRNT(lock->cll_depth > 0);
724 cl_lock_mutex_tail(env, lock);
725 } else if (cfs_mutex_trylock(&lock->cll_guard)) {
726 LINVRNT(lock->cll_depth == 0);
727 lock->cll_guarder = cfs_current();
728 cl_lock_mutex_tail(env, lock);
733 EXPORT_SYMBOL(cl_lock_mutex_try);
736 {* Unlocks cl_lock object.
738 * \pre cl_lock_is_mutexed(lock)
740 * \see cl_lock_mutex_get()
742 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
744 struct cl_thread_counters *counters;
746 LINVRNT(cl_lock_invariant(env, lock));
747 LINVRNT(cl_lock_is_mutexed(lock));
748 LINVRNT(lock->cll_guarder == cfs_current());
749 LINVRNT(lock->cll_depth > 0);
751 counters = cl_lock_counters(env, lock);
752 LINVRNT(counters->ctc_nr_locks_locked > 0);
754 cl_lock_trace(D_TRACE, env, "put mutex", lock);
755 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
756 counters->ctc_nr_locks_locked--;
757 if (--lock->cll_depth == 0) {
758 lock->cll_guarder = NULL;
759 cfs_mutex_unlock(&lock->cll_guard);
762 EXPORT_SYMBOL(cl_lock_mutex_put);
765 * Returns true iff lock's mutex is owned by the current thread.
767 int cl_lock_is_mutexed(struct cl_lock *lock)
769 return lock->cll_guarder == cfs_current();
771 EXPORT_SYMBOL(cl_lock_is_mutexed);
774 * Returns number of cl_lock mutices held by the current thread (environment).
776 int cl_lock_nr_mutexed(const struct lu_env *env)
778 struct cl_thread_info *info;
783 * NOTE: if summation across all nesting levels (currently 2) proves
784 * too expensive, a summary counter can be added to
785 * struct cl_thread_info.
787 info = cl_env_info(env);
788 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
789 locked += info->clt_counters[i].ctc_nr_locks_locked;
792 EXPORT_SYMBOL(cl_lock_nr_mutexed);
794 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
796 LINVRNT(cl_lock_is_mutexed(lock));
797 LINVRNT(cl_lock_invariant(env, lock));
799 if (!(lock->cll_flags & CLF_CANCELLED)) {
800 const struct cl_lock_slice *slice;
802 lock->cll_flags |= CLF_CANCELLED;
803 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
805 if (slice->cls_ops->clo_cancel != NULL)
806 slice->cls_ops->clo_cancel(env, slice);
812 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
814 struct cl_object_header *head;
815 const struct cl_lock_slice *slice;
817 LINVRNT(cl_lock_is_mutexed(lock));
818 LINVRNT(cl_lock_invariant(env, lock));
821 if (lock->cll_state < CLS_FREEING) {
822 LASSERT(lock->cll_state != CLS_INTRANSIT);
823 cl_lock_state_set(env, lock, CLS_FREEING);
825 head = cl_object_header(lock->cll_descr.cld_obj);
827 cfs_spin_lock(&head->coh_lock_guard);
828 cfs_list_del_init(&lock->cll_linkage);
830 cfs_spin_unlock(&head->coh_lock_guard);
832 * From now on, no new references to this lock can be acquired
833 * by cl_lock_lookup().
835 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
837 if (slice->cls_ops->clo_delete != NULL)
838 slice->cls_ops->clo_delete(env, slice);
841 * From now on, no new references to this lock can be acquired
842 * by layer-specific means (like a pointer from struct
843 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
846 * Lock will be finally freed in cl_lock_put() when last of
847 * existing references goes away.
854 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
855 * top-lock (nesting == 0) accounts for this modification in the per-thread
856 * debugging counters. Sub-lock holds can be released by a thread different
857 * from one that acquired it.
859 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
862 struct cl_thread_counters *counters;
863 enum clt_nesting_level nesting;
865 lock->cll_holds += delta;
866 nesting = cl_lock_nesting(lock);
867 if (nesting == CNL_TOP) {
868 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
869 counters->ctc_nr_held += delta;
870 LASSERT(counters->ctc_nr_held >= 0);
875 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
876 * cl_lock_hold_mod() for the explanation of the debugging code.
878 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
881 struct cl_thread_counters *counters;
882 enum clt_nesting_level nesting;
884 lock->cll_users += delta;
885 nesting = cl_lock_nesting(lock);
886 if (nesting == CNL_TOP) {
887 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
888 counters->ctc_nr_used += delta;
889 LASSERT(counters->ctc_nr_used >= 0);
893 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
894 const char *scope, const void *source)
896 LINVRNT(cl_lock_is_mutexed(lock));
897 LINVRNT(cl_lock_invariant(env, lock));
898 LASSERT(lock->cll_holds > 0);
901 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
902 lu_ref_del(&lock->cll_holders, scope, source);
903 cl_lock_hold_mod(env, lock, -1);
904 if (lock->cll_holds == 0) {
905 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
906 lock->cll_descr.cld_mode == CLM_GROUP)
908 * If lock is still phantom or grouplock when user is
909 * done with it---destroy the lock.
911 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
912 if (lock->cll_flags & CLF_CANCELPEND) {
913 lock->cll_flags &= ~CLF_CANCELPEND;
914 cl_lock_cancel0(env, lock);
916 if (lock->cll_flags & CLF_DOOMED) {
917 /* no longer doomed: it's dead... Jim. */
918 lock->cll_flags &= ~CLF_DOOMED;
919 cl_lock_delete0(env, lock);
926 * Waits until lock state is changed.
928 * This function is called with cl_lock mutex locked, atomically releases
929 * mutex and goes to sleep, waiting for a lock state change (signaled by
930 * cl_lock_signal()), and re-acquires the mutex before return.
932 * This function is used to wait until lock state machine makes some progress
933 * and to emulate synchronous operations on top of asynchronous lock
936 * \retval -EINTR wait was interrupted
938 * \retval 0 wait wasn't interrupted
940 * \pre cl_lock_is_mutexed(lock)
942 * \see cl_lock_signal()
944 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
946 cfs_waitlink_t waiter;
947 cfs_sigset_t blocked;
951 LINVRNT(cl_lock_is_mutexed(lock));
952 LINVRNT(cl_lock_invariant(env, lock));
953 LASSERT(lock->cll_depth == 1);
954 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
956 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
957 result = lock->cll_error;
959 /* To avoid being interrupted by the 'non-fatal' signals
960 * (SIGCHLD, for instance), we'd block them temporarily.
962 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
964 cfs_waitlink_init(&waiter);
965 cfs_waitq_add(&lock->cll_wq, &waiter);
966 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
967 cl_lock_mutex_put(env, lock);
969 LASSERT(cl_lock_nr_mutexed(env) == 0);
970 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
972 cl_lock_mutex_get(env, lock);
973 cfs_set_current_state(CFS_TASK_RUNNING);
974 cfs_waitq_del(&lock->cll_wq, &waiter);
975 result = cfs_signal_pending() ? -EINTR : 0;
977 /* Restore old blocked signals */
978 cfs_restore_sigs(blocked);
982 EXPORT_SYMBOL(cl_lock_state_wait);
984 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
985 enum cl_lock_state state)
987 const struct cl_lock_slice *slice;
990 LINVRNT(cl_lock_is_mutexed(lock));
991 LINVRNT(cl_lock_invariant(env, lock));
993 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
994 if (slice->cls_ops->clo_state != NULL)
995 slice->cls_ops->clo_state(env, slice, state);
996 cfs_waitq_broadcast(&lock->cll_wq);
1001 * Notifies waiters that lock state changed.
1003 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1004 * layers about state change by calling cl_lock_operations::clo_state()
1007 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1010 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1011 cl_lock_state_signal(env, lock, lock->cll_state);
1014 EXPORT_SYMBOL(cl_lock_signal);
1017 * Changes lock state.
1019 * This function is invoked to notify layers that lock state changed, possible
1020 * as a result of an asynchronous event such as call-back reception.
1022 * \post lock->cll_state == state
1024 * \see cl_lock_operations::clo_state()
1026 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1027 enum cl_lock_state state)
1029 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1032 LASSERT(lock->cll_state <= state ||
1033 (lock->cll_state == CLS_CACHED &&
1034 (state == CLS_HELD || /* lock found in cache */
1035 state == CLS_NEW || /* sub-lock canceled */
1036 state == CLS_INTRANSIT)) ||
1037 /* lock is in transit state */
1038 lock->cll_state == CLS_INTRANSIT);
1040 if (lock->cll_state != state) {
1041 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1042 cfs_atomic_inc(&site->cs_locks_state[state]);
1044 cl_lock_state_signal(env, lock, state);
1045 lock->cll_state = state;
1049 EXPORT_SYMBOL(cl_lock_state_set);
1051 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1053 const struct cl_lock_slice *slice;
1059 LINVRNT(cl_lock_is_mutexed(lock));
1060 LINVRNT(cl_lock_invariant(env, lock));
1061 LASSERT(lock->cll_state == CLS_INTRANSIT);
1064 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1066 if (slice->cls_ops->clo_unuse != NULL) {
1067 result = slice->cls_ops->clo_unuse(env, slice);
1072 LASSERT(result != -ENOSYS);
1073 } while (result == CLO_REPEAT);
1079 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1080 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1081 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1082 * use process atomic
1084 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1086 const struct cl_lock_slice *slice;
1088 enum cl_lock_state state;
1091 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1093 LASSERT(lock->cll_state == CLS_CACHED);
1094 if (lock->cll_error)
1095 RETURN(lock->cll_error);
1098 state = cl_lock_intransit(env, lock);
1099 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1100 if (slice->cls_ops->clo_use != NULL) {
1101 result = slice->cls_ops->clo_use(env, slice);
1106 LASSERT(result != -ENOSYS);
1108 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1114 if (result == -ESTALE) {
1116 * ESTALE means sublock being cancelled
1117 * at this time, and set lock state to
1118 * be NEW here and ask the caller to repeat.
1121 result = CLO_REPEAT;
1124 /* @atomic means back-off-on-failure. */
1127 rc = cl_unuse_try_internal(env, lock);
1128 /* Vet the results. */
1129 if (rc < 0 && result > 0)
1134 cl_lock_extransit(env, lock, state);
1137 EXPORT_SYMBOL(cl_use_try);
1140 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1143 static int cl_enqueue_kick(const struct lu_env *env,
1144 struct cl_lock *lock,
1145 struct cl_io *io, __u32 flags)
1148 const struct cl_lock_slice *slice;
1152 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1153 if (slice->cls_ops->clo_enqueue != NULL) {
1154 result = slice->cls_ops->clo_enqueue(env,
1160 LASSERT(result != -ENOSYS);
1165 * Tries to enqueue a lock.
1167 * This function is called repeatedly by cl_enqueue() until either lock is
1168 * enqueued, or error occurs. This function does not block waiting for
1169 * networking communication to complete.
1171 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1172 * lock->cll_state == CLS_HELD)
1174 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1175 * \see cl_lock_state::CLS_ENQUEUED
1177 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1178 struct cl_io *io, __u32 flags)
1183 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1187 LINVRNT(cl_lock_is_mutexed(lock));
1189 if (lock->cll_error != 0)
1191 switch (lock->cll_state) {
1193 cl_lock_state_set(env, lock, CLS_QUEUING);
1197 result = cl_enqueue_kick(env, lock, io, flags);
1199 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1202 LASSERT(cl_lock_is_intransit(lock));
1206 /* yank lock from the cache. */
1207 result = cl_use_try(env, lock, 0);
1216 * impossible, only held locks with increased
1217 * ->cll_holds can be enqueued, and they cannot be
1222 } while (result == CLO_REPEAT);
1224 cl_lock_error(env, lock, result);
1225 RETURN(result ?: lock->cll_error);
1227 EXPORT_SYMBOL(cl_enqueue_try);
1230 * Cancel the conflicting lock found during previous enqueue.
1232 * \retval 0 conflicting lock has been canceled.
1233 * \retval -ve error code.
1235 int cl_lock_enqueue_wait(const struct lu_env *env,
1236 struct cl_lock *lock,
1239 struct cl_lock *conflict;
1243 LASSERT(cl_lock_is_mutexed(lock));
1244 LASSERT(lock->cll_state == CLS_QUEUING);
1245 LASSERT(lock->cll_conflict != NULL);
1247 conflict = lock->cll_conflict;
1248 lock->cll_conflict = NULL;
1250 cl_lock_mutex_put(env, lock);
1251 LASSERT(cl_lock_nr_mutexed(env) == 0);
1253 cl_lock_mutex_get(env, conflict);
1254 cl_lock_cancel(env, conflict);
1255 cl_lock_delete(env, conflict);
1257 while (conflict->cll_state != CLS_FREEING) {
1258 rc = cl_lock_state_wait(env, conflict);
1262 cl_lock_mutex_put(env, conflict);
1263 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1264 cl_lock_put(env, conflict);
1267 cl_lock_mutex_get(env, lock);
1272 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1274 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1275 struct cl_io *io, __u32 enqflags)
1281 LINVRNT(cl_lock_is_mutexed(lock));
1282 LINVRNT(cl_lock_invariant(env, lock));
1283 LASSERT(lock->cll_holds > 0);
1285 cl_lock_user_add(env, lock);
1287 result = cl_enqueue_try(env, lock, io, enqflags);
1288 if (result == CLO_WAIT) {
1289 if (lock->cll_conflict != NULL)
1290 result = cl_lock_enqueue_wait(env, lock, 1);
1292 result = cl_lock_state_wait(env, lock);
1299 cl_lock_user_del(env, lock);
1300 cl_lock_error(env, lock, result);
1302 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1303 lock->cll_state == CLS_ENQUEUED ||
1304 lock->cll_state == CLS_HELD));
1311 * \pre current thread or io owns a hold on lock.
1313 * \post ergo(result == 0, lock->users increased)
1314 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1315 * lock->cll_state == CLS_HELD)
1317 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1318 struct cl_io *io, __u32 enqflags)
1324 cl_lock_lockdep_acquire(env, lock, enqflags);
1325 cl_lock_mutex_get(env, lock);
1326 result = cl_enqueue_locked(env, lock, io, enqflags);
1327 cl_lock_mutex_put(env, lock);
1329 cl_lock_lockdep_release(env, lock);
1330 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1331 lock->cll_state == CLS_HELD));
1334 EXPORT_SYMBOL(cl_enqueue);
1337 * Tries to unlock a lock.
1339 * This function is called repeatedly by cl_unuse() until either lock is
1340 * unlocked, or error occurs.
1341 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1343 * \pre lock->cll_state == CLS_HELD
1345 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1347 * \see cl_unuse() cl_lock_operations::clo_unuse()
1348 * \see cl_lock_state::CLS_CACHED
1350 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1353 enum cl_lock_state state = CLS_NEW;
1356 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1358 LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1359 if (lock->cll_users > 1) {
1360 cl_lock_user_del(env, lock);
1365 * New lock users (->cll_users) are not protecting unlocking
1366 * from proceeding. From this point, lock eventually reaches
1367 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1370 state = cl_lock_intransit(env, lock);
1372 result = cl_unuse_try_internal(env, lock);
1373 LASSERT(lock->cll_state == CLS_INTRANSIT);
1374 LASSERT(result != CLO_WAIT);
1375 cl_lock_user_del(env, lock);
1376 if (result == 0 || result == -ESTALE) {
1378 * Return lock back to the cache. This is the only
1379 * place where lock is moved into CLS_CACHED state.
1381 * If one of ->clo_unuse() methods returned -ESTALE, lock
1382 * cannot be placed into cache and has to be
1383 * re-initialized. This happens e.g., when a sub-lock was
1384 * canceled while unlocking was in progress.
1386 if (state == CLS_HELD && result == 0)
1390 cl_lock_extransit(env, lock, state);
1393 * Hide -ESTALE error.
1394 * If the lock is a glimpse lock, and it has multiple
1395 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1396 * and other sublocks are matched write locks. In this case,
1397 * we can't set this lock to error because otherwise some of
1398 * its sublocks may not be canceled. This causes some dirty
1399 * pages won't be written to OSTs. -jay
1403 CERROR("result = %d, this is unlikely!\n", result);
1404 cl_lock_extransit(env, lock, state);
1407 result = result ?: lock->cll_error;
1409 cl_lock_error(env, lock, result);
1412 EXPORT_SYMBOL(cl_unuse_try);
1414 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1419 result = cl_unuse_try(env, lock);
1421 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1429 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1432 cl_lock_mutex_get(env, lock);
1433 cl_unuse_locked(env, lock);
1434 cl_lock_mutex_put(env, lock);
1435 cl_lock_lockdep_release(env, lock);
1438 EXPORT_SYMBOL(cl_unuse);
1441 * Tries to wait for a lock.
1443 * This function is called repeatedly by cl_wait() until either lock is
1444 * granted, or error occurs. This function does not block waiting for network
1445 * communication to complete.
1447 * \see cl_wait() cl_lock_operations::clo_wait()
1448 * \see cl_lock_state::CLS_HELD
1450 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1452 const struct cl_lock_slice *slice;
1456 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1458 LINVRNT(cl_lock_is_mutexed(lock));
1459 LINVRNT(cl_lock_invariant(env, lock));
1460 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1461 lock->cll_state == CLS_HELD ||
1462 lock->cll_state == CLS_INTRANSIT);
1463 LASSERT(lock->cll_users > 0);
1464 LASSERT(lock->cll_holds > 0);
1467 if (lock->cll_error != 0)
1470 if (cl_lock_is_intransit(lock)) {
1475 if (lock->cll_state == CLS_HELD)
1480 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1481 if (slice->cls_ops->clo_wait != NULL) {
1482 result = slice->cls_ops->clo_wait(env, slice);
1487 LASSERT(result != -ENOSYS);
1489 LASSERT(lock->cll_state != CLS_INTRANSIT);
1490 cl_lock_state_set(env, lock, CLS_HELD);
1492 } while (result == CLO_REPEAT);
1493 RETURN(result ?: lock->cll_error);
1495 EXPORT_SYMBOL(cl_wait_try);
1498 * Waits until enqueued lock is granted.
1500 * \pre current thread or io owns a hold on the lock
1501 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1502 * lock->cll_state == CLS_HELD)
1504 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1506 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1511 cl_lock_mutex_get(env, lock);
1513 LINVRNT(cl_lock_invariant(env, lock));
1514 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1515 "Wrong state %d \n", lock->cll_state);
1516 LASSERT(lock->cll_holds > 0);
1519 result = cl_wait_try(env, lock);
1520 if (result == CLO_WAIT) {
1521 result = cl_lock_state_wait(env, lock);
1528 cl_lock_user_del(env, lock);
1529 cl_lock_error(env, lock, result);
1530 cl_lock_lockdep_release(env, lock);
1532 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1533 cl_lock_mutex_put(env, lock);
1534 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1537 EXPORT_SYMBOL(cl_wait);
1540 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1543 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1545 const struct cl_lock_slice *slice;
1546 unsigned long pound;
1547 unsigned long ounce;
1550 LINVRNT(cl_lock_is_mutexed(lock));
1551 LINVRNT(cl_lock_invariant(env, lock));
1554 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1555 if (slice->cls_ops->clo_weigh != NULL) {
1556 ounce = slice->cls_ops->clo_weigh(env, slice);
1558 if (pound < ounce) /* over-weight^Wflow */
1564 EXPORT_SYMBOL(cl_lock_weigh);
1567 * Notifies layers that lock description changed.
1569 * The server can grant client a lock different from one that was requested
1570 * (e.g., larger in extent). This method is called when actually granted lock
1571 * description becomes known to let layers to accommodate for changed lock
1574 * \see cl_lock_operations::clo_modify()
1576 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1577 const struct cl_lock_descr *desc)
1579 const struct cl_lock_slice *slice;
1580 struct cl_object *obj = lock->cll_descr.cld_obj;
1581 struct cl_object_header *hdr = cl_object_header(obj);
1585 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1586 /* don't allow object to change */
1587 LASSERT(obj == desc->cld_obj);
1588 LINVRNT(cl_lock_is_mutexed(lock));
1589 LINVRNT(cl_lock_invariant(env, lock));
1591 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1592 if (slice->cls_ops->clo_modify != NULL) {
1593 result = slice->cls_ops->clo_modify(env, slice, desc);
1598 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1599 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1601 * Just replace description in place. Nothing more is needed for
1602 * now. If locks were indexed according to their extent and/or mode,
1603 * that index would have to be updated here.
1605 cfs_spin_lock(&hdr->coh_lock_guard);
1606 lock->cll_descr = *desc;
1607 cfs_spin_unlock(&hdr->coh_lock_guard);
1610 EXPORT_SYMBOL(cl_lock_modify);
1613 * Initializes lock closure with a given origin.
1615 * \see cl_lock_closure
1617 void cl_lock_closure_init(const struct lu_env *env,
1618 struct cl_lock_closure *closure,
1619 struct cl_lock *origin, int wait)
1621 LINVRNT(cl_lock_is_mutexed(origin));
1622 LINVRNT(cl_lock_invariant(env, origin));
1624 CFS_INIT_LIST_HEAD(&closure->clc_list);
1625 closure->clc_origin = origin;
1626 closure->clc_wait = wait;
1627 closure->clc_nr = 0;
1629 EXPORT_SYMBOL(cl_lock_closure_init);
1632 * Builds a closure of \a lock.
1634 * Building of a closure consists of adding initial lock (\a lock) into it,
1635 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1636 * methods might call cl_lock_closure_build() recursively again, adding more
1637 * locks to the closure, etc.
1639 * \see cl_lock_closure
1641 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1642 struct cl_lock_closure *closure)
1644 const struct cl_lock_slice *slice;
1648 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1649 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1651 result = cl_lock_enclosure(env, lock, closure);
1653 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1654 if (slice->cls_ops->clo_closure != NULL) {
1655 result = slice->cls_ops->clo_closure(env, slice,
1663 cl_lock_disclosure(env, closure);
1666 EXPORT_SYMBOL(cl_lock_closure_build);
1669 * Adds new lock to a closure.
1671 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1672 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1673 * until next try-lock is likely to succeed.
1675 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1676 struct cl_lock_closure *closure)
1680 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1681 if (!cl_lock_mutex_try(env, lock)) {
1683 * If lock->cll_inclosure is not empty, lock is already in
1686 if (cfs_list_empty(&lock->cll_inclosure)) {
1687 cl_lock_get_trust(lock);
1688 lu_ref_add(&lock->cll_reference, "closure", closure);
1689 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1692 cl_lock_mutex_put(env, lock);
1695 cl_lock_disclosure(env, closure);
1696 if (closure->clc_wait) {
1697 cl_lock_get_trust(lock);
1698 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1699 cl_lock_mutex_put(env, closure->clc_origin);
1701 LASSERT(cl_lock_nr_mutexed(env) == 0);
1702 cl_lock_mutex_get(env, lock);
1703 cl_lock_mutex_put(env, lock);
1705 cl_lock_mutex_get(env, closure->clc_origin);
1706 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1707 cl_lock_put(env, lock);
1709 result = CLO_REPEAT;
1713 EXPORT_SYMBOL(cl_lock_enclosure);
1715 /** Releases mutices of enclosed locks. */
1716 void cl_lock_disclosure(const struct lu_env *env,
1717 struct cl_lock_closure *closure)
1719 struct cl_lock *scan;
1720 struct cl_lock *temp;
1722 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1723 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1725 cfs_list_del_init(&scan->cll_inclosure);
1726 cl_lock_mutex_put(env, scan);
1727 lu_ref_del(&scan->cll_reference, "closure", closure);
1728 cl_lock_put(env, scan);
1731 LASSERT(closure->clc_nr == 0);
1733 EXPORT_SYMBOL(cl_lock_disclosure);
1735 /** Finalizes a closure. */
1736 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1738 LASSERT(closure->clc_nr == 0);
1739 LASSERT(cfs_list_empty(&closure->clc_list));
1741 EXPORT_SYMBOL(cl_lock_closure_fini);
1744 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1745 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1746 * destruction until all holds are released. This is called when a decision is
1747 * made to destroy the lock in the future. E.g., when a blocking AST is
1748 * received on it, or fatal communication error happens.
1750 * Caller must have a reference on this lock to prevent a situation, when
1751 * deleted lock lingers in memory for indefinite time, because nobody calls
1752 * cl_lock_put() to finish it.
1754 * \pre atomic_read(&lock->cll_ref) > 0
1755 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1756 * cl_lock_nr_mutexed(env) == 1)
1757 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1758 * held, as deletion of sub-locks might require releasing a top-lock
1761 * \see cl_lock_operations::clo_delete()
1762 * \see cl_lock::cll_holds
1764 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1766 LINVRNT(cl_lock_is_mutexed(lock));
1767 LINVRNT(cl_lock_invariant(env, lock));
1768 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1769 cl_lock_nr_mutexed(env) == 1));
1772 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1773 if (lock->cll_holds == 0)
1774 cl_lock_delete0(env, lock);
1776 lock->cll_flags |= CLF_DOOMED;
1779 EXPORT_SYMBOL(cl_lock_delete);
1782 * Mark lock as irrecoverably failed, and mark it for destruction. This
1783 * happens when, e.g., server fails to grant a lock to us, or networking
1786 * \pre atomic_read(&lock->cll_ref) > 0
1788 * \see clo_lock_delete()
1789 * \see cl_lock::cll_holds
1791 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1793 LINVRNT(cl_lock_is_mutexed(lock));
1794 LINVRNT(cl_lock_invariant(env, lock));
1797 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1798 if (lock->cll_error == 0 && error != 0) {
1799 lock->cll_error = error;
1800 cl_lock_signal(env, lock);
1801 cl_lock_cancel(env, lock);
1802 cl_lock_delete(env, lock);
1806 EXPORT_SYMBOL(cl_lock_error);
1809 * Cancels this lock. Notifies layers
1810 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1811 * there are holds on the lock, postpone cancellation until
1812 * all holds are released.
1814 * Cancellation notification is delivered to layers at most once.
1816 * \see cl_lock_operations::clo_cancel()
1817 * \see cl_lock::cll_holds
1819 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1821 LINVRNT(cl_lock_is_mutexed(lock));
1822 LINVRNT(cl_lock_invariant(env, lock));
1825 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1826 if (lock->cll_holds == 0)
1827 cl_lock_cancel0(env, lock);
1829 lock->cll_flags |= CLF_CANCELPEND;
1832 EXPORT_SYMBOL(cl_lock_cancel);
1835 * Finds an existing lock covering given page and optionally different from a
1836 * given \a except lock.
1838 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1839 struct cl_page *page, struct cl_lock *except,
1840 int pending, int canceld)
1842 struct cl_object_header *head;
1843 struct cl_lock *scan;
1844 struct cl_lock *lock;
1845 struct cl_lock_descr *need;
1849 head = cl_object_header(obj);
1850 need = &cl_env_info(env)->clt_descr;
1853 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1855 need->cld_start = need->cld_end = page->cp_index;
1856 need->cld_enq_flags = 0;
1858 cfs_spin_lock(&head->coh_lock_guard);
1859 /* It is fine to match any group lock since there could be only one
1860 * with a uniq gid and it conflicts with all other lock modes too */
1861 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1862 if (scan != except &&
1863 (scan->cll_descr.cld_mode == CLM_GROUP ||
1864 cl_lock_ext_match(&scan->cll_descr, need)) &&
1865 scan->cll_state >= CLS_HELD &&
1866 scan->cll_state < CLS_FREEING &&
1868 * This check is racy as the lock can be canceled right
1869 * after it is done, but this is fine, because page exists
1872 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1873 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1874 /* Don't increase cs_hit here since this
1875 * is just a helper function. */
1876 cl_lock_get_trust(scan);
1881 cfs_spin_unlock(&head->coh_lock_guard);
1884 EXPORT_SYMBOL(cl_lock_at_page);
1887 * Calculate the page offset at the layer of @lock.
1888 * At the time of this writing, @page is top page and @lock is sub lock.
1890 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1892 struct lu_device_type *dtype;
1893 const struct cl_page_slice *slice;
1895 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1896 slice = cl_page_at(page, dtype);
1897 LASSERT(slice != NULL);
1898 return slice->cpl_page->cp_index;
1902 * Check if page @page is covered by an extra lock or discard it.
1904 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1905 struct cl_page *page, void *cbdata)
1907 struct cl_thread_info *info = cl_env_info(env);
1908 struct cl_lock *lock = cbdata;
1909 pgoff_t index = pgoff_at_lock(page, lock);
1911 if (index >= info->clt_fn_index) {
1912 struct cl_lock *tmp;
1914 /* refresh non-overlapped index */
1915 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
1918 /* Cache the first-non-overlapped index so as to skip
1919 * all pages within [index, clt_fn_index). This
1920 * is safe because if tmp lock is canceled, it will
1921 * discard these pages. */
1922 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1923 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1924 info->clt_fn_index = CL_PAGE_EOF;
1925 cl_lock_put(env, tmp);
1926 } else if (cl_page_own(env, io, page) == 0) {
1927 /* discard the page */
1928 cl_page_unmap(env, io, page);
1929 cl_page_discard(env, io, page);
1930 cl_page_disown(env, io, page);
1932 LASSERT(page->cp_state == CPS_FREEING);
1936 info->clt_next_index = index + 1;
1937 return CLP_GANG_OKAY;
1940 static int pageout_cb(const struct lu_env *env, struct cl_io *io,
1941 struct cl_page *page, void *cbdata)
1943 struct cl_thread_info *info = cl_env_info(env);
1944 struct cl_page_list *queue = &info->clt_queue.c2_qin;
1945 struct cl_lock *lock = cbdata;
1946 typeof(cl_page_own) *page_own;
1947 int rc = CLP_GANG_OKAY;
1949 page_own = queue->pl_nr ? cl_page_own_try : cl_page_own;
1950 if (page_own(env, io, page) == 0) {
1951 cl_page_list_add(queue, page);
1952 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1953 } else if (page->cp_state != CPS_FREEING) {
1954 /* cl_page_own() won't fail unless
1955 * the page is being freed. */
1956 LASSERT(queue->pl_nr != 0);
1957 rc = CLP_GANG_AGAIN;
1964 * Invalidate pages protected by the given lock, sending them out to the
1965 * server first, if necessary.
1967 * This function does the following:
1969 * - collects a list of pages to be invalidated,
1971 * - unmaps them from the user virtual memory,
1973 * - sends dirty pages to the server,
1975 * - waits for transfer completion,
1977 * - discards pages, and throws them out of memory.
1979 * If \a discard is set, pages are discarded without sending them to the
1982 * If error happens on any step, the process continues anyway (the reasoning
1983 * behind this being that lock cancellation cannot be delayed indefinitely).
1985 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1988 struct cl_thread_info *info = cl_env_info(env);
1989 struct cl_io *io = &info->clt_io;
1990 struct cl_2queue *queue = &info->clt_queue;
1991 struct cl_lock_descr *descr = &lock->cll_descr;
1992 cl_page_gang_cb_t cb;
1997 LINVRNT(cl_lock_invariant(env, lock));
2000 io->ci_obj = cl_object_top(descr->cld_obj);
2001 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2005 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : pageout_cb;
2006 info->clt_fn_index = info->clt_next_index = descr->cld_start;
2008 cl_2queue_init(queue);
2009 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2010 info->clt_next_index, descr->cld_end,
2012 page_count = queue->c2_qin.pl_nr;
2013 if (page_count > 0) {
2014 /* must be writeback case */
2015 LASSERTF(descr->cld_mode >= CLM_WRITE, "lock mode %s\n",
2016 cl_lock_mode_name(descr->cld_mode));
2018 result = cl_page_list_unmap(env, io, &queue->c2_qin);
2020 long timeout = 600; /* 10 minutes. */
2021 /* for debug purpose, if this request can't be
2022 * finished in 10 minutes, we hope it can
2025 result = cl_io_submit_sync(env, io, CRT_WRITE,
2029 CWARN("Writing %lu pages error: %d\n",
2030 page_count, result);
2032 cl_2queue_discard(env, io, queue);
2033 cl_2queue_disown(env, io, queue);
2034 cl_2queue_fini(env, queue);
2037 if (info->clt_next_index > descr->cld_end)
2040 if (res == CLP_GANG_RESCHED)
2042 } while (res != CLP_GANG_OKAY);
2044 cl_io_fini(env, io);
2047 EXPORT_SYMBOL(cl_lock_page_out);
2050 * Eliminate all locks for a given object.
2052 * Caller has to guarantee that no lock is in active use.
2054 * \param cancel when this is set, cl_locks_prune() cancels locks before
2057 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2059 struct cl_object_header *head;
2060 struct cl_lock *lock;
2063 head = cl_object_header(obj);
2065 * If locks are destroyed without cancellation, all pages must be
2066 * already destroyed (as otherwise they will be left unprotected).
2068 LASSERT(ergo(!cancel,
2069 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2071 cfs_spin_lock(&head->coh_lock_guard);
2072 while (!cfs_list_empty(&head->coh_locks)) {
2073 lock = container_of(head->coh_locks.next,
2074 struct cl_lock, cll_linkage);
2075 cl_lock_get_trust(lock);
2076 cfs_spin_unlock(&head->coh_lock_guard);
2077 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2080 cl_lock_mutex_get(env, lock);
2081 if (lock->cll_state < CLS_FREEING) {
2082 LASSERT(lock->cll_holds == 0);
2083 LASSERT(lock->cll_users <= 1);
2084 if (unlikely(lock->cll_users == 1)) {
2085 struct l_wait_info lwi = { 0 };
2087 cl_lock_mutex_put(env, lock);
2088 l_wait_event(lock->cll_wq,
2089 lock->cll_users == 0,
2095 cl_lock_cancel(env, lock);
2096 cl_lock_delete(env, lock);
2098 cl_lock_mutex_put(env, lock);
2099 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2100 cl_lock_put(env, lock);
2101 cfs_spin_lock(&head->coh_lock_guard);
2103 cfs_spin_unlock(&head->coh_lock_guard);
2106 EXPORT_SYMBOL(cl_locks_prune);
2108 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2109 const struct cl_io *io,
2110 const struct cl_lock_descr *need,
2111 const char *scope, const void *source)
2113 struct cl_lock *lock;
2118 lock = cl_lock_find(env, io, need);
2121 cl_lock_mutex_get(env, lock);
2122 if (lock->cll_state < CLS_FREEING &&
2123 !(lock->cll_flags & CLF_CANCELLED)) {
2124 cl_lock_hold_mod(env, lock, +1);
2125 lu_ref_add(&lock->cll_holders, scope, source);
2126 lu_ref_add(&lock->cll_reference, scope, source);
2129 cl_lock_mutex_put(env, lock);
2130 cl_lock_put(env, lock);
2136 * Returns a lock matching \a need description with a reference and a hold on
2139 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2140 * guarantees that lock is not in the CLS_FREEING state on return.
2142 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2143 const struct cl_lock_descr *need,
2144 const char *scope, const void *source)
2146 struct cl_lock *lock;
2150 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2152 cl_lock_mutex_put(env, lock);
2155 EXPORT_SYMBOL(cl_lock_hold);
2158 * Main high-level entry point of cl_lock interface that finds existing or
2159 * enqueues new lock matching given description.
2161 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2162 const struct cl_lock_descr *need,
2163 const char *scope, const void *source)
2165 struct cl_lock *lock;
2167 __u32 enqflags = need->cld_enq_flags;
2171 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2175 rc = cl_enqueue_locked(env, lock, io, enqflags);
2177 if (cl_lock_fits_into(env, lock, need, io)) {
2178 if (!(enqflags & CEF_AGL)) {
2179 cl_lock_mutex_put(env, lock);
2180 cl_lock_lockdep_acquire(env, lock,
2186 cl_unuse_locked(env, lock);
2188 cl_lock_trace(D_DLMTRACE, env,
2189 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2190 cl_lock_hold_release(env, lock, scope, source);
2191 cl_lock_mutex_put(env, lock);
2192 lu_ref_del(&lock->cll_reference, scope, source);
2193 cl_lock_put(env, lock);
2195 LASSERT(enqflags & CEF_AGL);
2197 } else if (rc != 0) {
2203 EXPORT_SYMBOL(cl_lock_request);
2206 * Adds a hold to a known lock.
2208 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2209 const char *scope, const void *source)
2211 LINVRNT(cl_lock_is_mutexed(lock));
2212 LINVRNT(cl_lock_invariant(env, lock));
2213 LASSERT(lock->cll_state != CLS_FREEING);
2216 cl_lock_hold_mod(env, lock, +1);
2218 lu_ref_add(&lock->cll_holders, scope, source);
2219 lu_ref_add(&lock->cll_reference, scope, source);
2222 EXPORT_SYMBOL(cl_lock_hold_add);
2225 * Releases a hold and a reference on a lock, on which caller acquired a
2228 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2229 const char *scope, const void *source)
2231 LINVRNT(cl_lock_invariant(env, lock));
2233 cl_lock_hold_release(env, lock, scope, source);
2234 lu_ref_del(&lock->cll_reference, scope, source);
2235 cl_lock_put(env, lock);
2238 EXPORT_SYMBOL(cl_lock_unhold);
2241 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2243 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2244 const char *scope, const void *source)
2246 LINVRNT(cl_lock_invariant(env, lock));
2248 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2249 cl_lock_mutex_get(env, lock);
2250 cl_lock_hold_release(env, lock, scope, source);
2251 cl_lock_mutex_put(env, lock);
2252 lu_ref_del(&lock->cll_reference, scope, source);
2253 cl_lock_put(env, lock);
2256 EXPORT_SYMBOL(cl_lock_release);
2258 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2260 LINVRNT(cl_lock_is_mutexed(lock));
2261 LINVRNT(cl_lock_invariant(env, lock));
2264 cl_lock_used_mod(env, lock, +1);
2267 EXPORT_SYMBOL(cl_lock_user_add);
2269 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2271 LINVRNT(cl_lock_is_mutexed(lock));
2272 LINVRNT(cl_lock_invariant(env, lock));
2273 LASSERT(lock->cll_users > 0);
2276 cl_lock_used_mod(env, lock, -1);
2277 if (lock->cll_users == 0)
2278 cfs_waitq_broadcast(&lock->cll_wq);
2281 EXPORT_SYMBOL(cl_lock_user_del);
2283 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2285 static const char *names[] = {
2286 [CLM_PHANTOM] = "P",
2291 if (0 <= mode && mode < ARRAY_SIZE(names))
2296 EXPORT_SYMBOL(cl_lock_mode_name);
2299 * Prints human readable representation of a lock description.
2301 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2302 lu_printer_t printer,
2303 const struct cl_lock_descr *descr)
2305 const struct lu_fid *fid;
2307 fid = lu_object_fid(&descr->cld_obj->co_lu);
2308 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2310 EXPORT_SYMBOL(cl_lock_descr_print);
2313 * Prints human readable representation of \a lock to the \a f.
2315 void cl_lock_print(const struct lu_env *env, void *cookie,
2316 lu_printer_t printer, const struct cl_lock *lock)
2318 const struct cl_lock_slice *slice;
2319 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2320 lock, cfs_atomic_read(&lock->cll_ref),
2321 lock->cll_state, lock->cll_error, lock->cll_holds,
2322 lock->cll_users, lock->cll_flags);
2323 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2324 (*printer)(env, cookie, " {\n");
2326 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2327 (*printer)(env, cookie, " %s@%p: ",
2328 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2330 if (slice->cls_ops->clo_print != NULL)
2331 slice->cls_ops->clo_print(env, cookie, printer, slice);
2332 (*printer)(env, cookie, "\n");
2334 (*printer)(env, cookie, "} lock@%p\n", lock);
2336 EXPORT_SYMBOL(cl_lock_print);
2338 int cl_lock_init(void)
2340 return lu_kmem_init(cl_lock_caches);
2343 void cl_lock_fini(void)
2345 lu_kmem_fini(cl_lock_caches);