1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
40 * Author: Nikita Danilov <nikita.danilov@sun.com>
43 #define DEBUG_SUBSYSTEM S_CLASS
45 # define EXPORT_SYMTAB
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_fid.h>
51 #include <libcfs/list.h>
52 /* lu_time_global_{init,fini}() */
55 #include <cl_object.h>
56 #include "cl_internal.h"
58 /** Lock class of cl_lock::cll_guard */
59 static cfs_lock_class_key_t cl_lock_guard_class;
60 static cfs_mem_cache_t *cl_lock_kmem;
62 static struct lu_kmem_descr cl_lock_caches[] = {
64 .ckd_cache = &cl_lock_kmem,
65 .ckd_name = "cl_lock_kmem",
66 .ckd_size = sizeof (struct cl_lock)
74 * Basic lock invariant that is maintained at all times. Caller either has a
75 * reference to \a lock, or somehow assures that \a lock cannot be freed.
77 * \see cl_lock_invariant()
79 static int cl_lock_invariant_trusted(const struct lu_env *env,
80 const struct cl_lock *lock)
82 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84 lock->cll_holds >= lock->cll_users &&
85 lock->cll_holds >= 0 &&
86 lock->cll_users >= 0 &&
91 * Stronger lock invariant, checking that caller has a reference on a lock.
93 * \see cl_lock_invariant_trusted()
95 static int cl_lock_invariant(const struct lu_env *env,
96 const struct cl_lock *lock)
100 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
101 cl_lock_invariant_trusted(env, lock);
102 if (!result && env != NULL)
103 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
108 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
112 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
116 * Returns a set of counters for this lock, depending on a lock nesting.
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119 const struct cl_lock *lock)
121 struct cl_thread_info *info;
122 enum clt_nesting_level nesting;
124 info = cl_env_info(env);
125 nesting = cl_lock_nesting(lock);
126 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127 return &info->clt_counters[nesting];
130 static void cl_lock_trace0(int level, const struct lu_env *env,
131 const char *prefix, const struct cl_lock *lock,
132 const char *func, const int line)
134 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
135 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
136 "(%p/%d/%d) at %s():%d\n",
137 prefix, lock, cfs_atomic_read(&lock->cll_ref),
138 lock->cll_guarder, lock->cll_depth,
139 lock->cll_state, lock->cll_error, lock->cll_holds,
140 lock->cll_users, lock->cll_flags,
141 env, h->coh_nesting, cl_lock_nr_mutexed(env),
144 #define cl_lock_trace(level, env, prefix, lock) \
145 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
147 #define RETIP ((unsigned long)__builtin_return_address(0))
149 #ifdef CONFIG_LOCKDEP
150 static cfs_lock_class_key_t cl_lock_key;
152 static void cl_lock_lockdep_init(struct cl_lock *lock)
154 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
157 static void cl_lock_lockdep_acquire(const struct lu_env *env,
158 struct cl_lock *lock, __u32 enqflags)
160 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
161 #ifdef HAVE_LOCK_MAP_ACQUIRE
162 lock_map_acquire(&lock->dep_map);
163 #else /* HAVE_LOCK_MAP_ACQUIRE */
164 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
165 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
166 /* check: */ 2, RETIP);
167 #endif /* HAVE_LOCK_MAP_ACQUIRE */
170 static void cl_lock_lockdep_release(const struct lu_env *env,
171 struct cl_lock *lock)
173 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
174 lock_release(&lock->dep_map, 0, RETIP);
177 #else /* !CONFIG_LOCKDEP */
179 static void cl_lock_lockdep_init(struct cl_lock *lock)
181 static void cl_lock_lockdep_acquire(const struct lu_env *env,
182 struct cl_lock *lock, __u32 enqflags)
184 static void cl_lock_lockdep_release(const struct lu_env *env,
185 struct cl_lock *lock)
188 #endif /* !CONFIG_LOCKDEP */
191 * Adds lock slice to the compound lock.
193 * This is called by cl_object_operations::coo_lock_init() methods to add a
194 * per-layer state to the lock. New state is added at the end of
195 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
197 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
199 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
200 struct cl_object *obj,
201 const struct cl_lock_operations *ops)
204 slice->cls_lock = lock;
205 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
206 slice->cls_obj = obj;
207 slice->cls_ops = ops;
210 EXPORT_SYMBOL(cl_lock_slice_add);
213 * Returns true iff a lock with the mode \a has provides at least the same
214 * guarantees as a lock with the mode \a need.
216 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
218 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
219 need == CLM_PHANTOM || need == CLM_GROUP);
220 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
221 has == CLM_PHANTOM || has == CLM_GROUP);
222 CLASSERT(CLM_PHANTOM < CLM_READ);
223 CLASSERT(CLM_READ < CLM_WRITE);
224 CLASSERT(CLM_WRITE < CLM_GROUP);
226 if (has != CLM_GROUP)
231 EXPORT_SYMBOL(cl_lock_mode_match);
234 * Returns true iff extent portions of lock descriptions match.
236 int cl_lock_ext_match(const struct cl_lock_descr *has,
237 const struct cl_lock_descr *need)
240 has->cld_start <= need->cld_start &&
241 has->cld_end >= need->cld_end &&
242 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
243 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
245 EXPORT_SYMBOL(cl_lock_ext_match);
248 * Returns true iff a lock with the description \a has provides at least the
249 * same guarantees as a lock with the description \a need.
251 int cl_lock_descr_match(const struct cl_lock_descr *has,
252 const struct cl_lock_descr *need)
255 cl_object_same(has->cld_obj, need->cld_obj) &&
256 cl_lock_ext_match(has, need);
258 EXPORT_SYMBOL(cl_lock_descr_match);
260 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
262 struct cl_object *obj = lock->cll_descr.cld_obj;
264 LINVRNT(!cl_lock_is_mutexed(lock));
267 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
269 while (!cfs_list_empty(&lock->cll_layers)) {
270 struct cl_lock_slice *slice;
272 slice = cfs_list_entry(lock->cll_layers.next,
273 struct cl_lock_slice, cls_linkage);
274 cfs_list_del_init(lock->cll_layers.next);
275 slice->cls_ops->clo_fini(env, slice);
277 cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
278 cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
279 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
280 cl_object_put(env, obj);
281 lu_ref_fini(&lock->cll_reference);
282 lu_ref_fini(&lock->cll_holders);
283 cfs_mutex_destroy(&lock->cll_guard);
284 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
289 * Releases a reference on a lock.
291 * When last reference is released, lock is returned to the cache, unless it
292 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
295 * \see cl_object_put(), cl_page_put()
297 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
299 struct cl_object *obj;
300 struct cl_site *site;
302 LINVRNT(cl_lock_invariant(env, lock));
304 obj = lock->cll_descr.cld_obj;
305 LINVRNT(obj != NULL);
306 site = cl_object_site(obj);
308 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
309 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
311 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
312 if (lock->cll_state == CLS_FREEING) {
313 LASSERT(cfs_list_empty(&lock->cll_linkage));
314 cl_lock_free(env, lock);
316 cfs_atomic_dec(&site->cs_locks.cs_busy);
320 EXPORT_SYMBOL(cl_lock_put);
323 * Acquires an additional reference to a lock.
325 * This can be called only by caller already possessing a reference to \a
328 * \see cl_object_get(), cl_page_get()
330 void cl_lock_get(struct cl_lock *lock)
332 LINVRNT(cl_lock_invariant(NULL, lock));
333 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
334 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
335 cfs_atomic_inc(&lock->cll_ref);
337 EXPORT_SYMBOL(cl_lock_get);
340 * Acquires a reference to a lock.
342 * This is much like cl_lock_get(), except that this function can be used to
343 * acquire initial reference to the cached lock. Caller has to deal with all
344 * possible races. Use with care!
346 * \see cl_page_get_trust()
348 void cl_lock_get_trust(struct cl_lock *lock)
350 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
352 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
353 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
354 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
355 cfs_atomic_inc(&site->cs_locks.cs_busy);
357 EXPORT_SYMBOL(cl_lock_get_trust);
360 * Helper function destroying the lock that wasn't completely initialized.
362 * Other threads can acquire references to the top-lock through its
363 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
365 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
367 cl_lock_mutex_get(env, lock);
368 cl_lock_cancel(env, lock);
369 cl_lock_delete(env, lock);
370 cl_lock_mutex_put(env, lock);
371 cl_lock_put(env, lock);
374 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
375 struct cl_object *obj,
376 const struct cl_io *io,
377 const struct cl_lock_descr *descr)
379 struct cl_lock *lock;
380 struct lu_object_header *head;
381 struct cl_site *site = cl_object_site(obj);
384 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
386 cfs_atomic_set(&lock->cll_ref, 1);
387 lock->cll_descr = *descr;
388 lock->cll_state = CLS_NEW;
390 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
392 CFS_INIT_LIST_HEAD(&lock->cll_layers);
393 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
394 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
395 lu_ref_init(&lock->cll_reference);
396 lu_ref_init(&lock->cll_holders);
397 cfs_mutex_init(&lock->cll_guard);
398 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
399 cfs_waitq_init(&lock->cll_wq);
400 head = obj->co_lu.lo_header;
401 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
402 cfs_atomic_inc(&site->cs_locks.cs_total);
403 cfs_atomic_inc(&site->cs_locks.cs_created);
404 cl_lock_lockdep_init(lock);
405 cfs_list_for_each_entry(obj, &head->loh_layers,
409 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
411 cl_lock_finish(env, lock);
417 lock = ERR_PTR(-ENOMEM);
422 * Transfer the lock into INTRANSIT state and return the original state.
424 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
425 * \post state: CLS_INTRANSIT
428 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
429 struct cl_lock *lock)
431 enum cl_lock_state state = lock->cll_state;
433 LASSERT(cl_lock_is_mutexed(lock));
434 LASSERT(state != CLS_INTRANSIT);
435 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
436 "Malformed lock state %d.\n", state);
438 cl_lock_state_set(env, lock, CLS_INTRANSIT);
439 lock->cll_intransit_owner = cfs_current();
440 cl_lock_hold_add(env, lock, "intransit", cfs_current());
443 EXPORT_SYMBOL(cl_lock_intransit);
446 * Exit the intransit state and restore the lock state to the original state
448 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
449 enum cl_lock_state state)
451 LASSERT(cl_lock_is_mutexed(lock));
452 LASSERT(lock->cll_state == CLS_INTRANSIT);
453 LASSERT(state != CLS_INTRANSIT);
454 LASSERT(lock->cll_intransit_owner == cfs_current());
456 lock->cll_intransit_owner = NULL;
457 cl_lock_state_set(env, lock, state);
458 cl_lock_unhold(env, lock, "intransit", cfs_current());
460 EXPORT_SYMBOL(cl_lock_extransit);
463 * Checking whether the lock is intransit state
465 int cl_lock_is_intransit(struct cl_lock *lock)
467 LASSERT(cl_lock_is_mutexed(lock));
468 return lock->cll_state == CLS_INTRANSIT &&
469 lock->cll_intransit_owner != cfs_current();
471 EXPORT_SYMBOL(cl_lock_is_intransit);
473 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
474 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
475 * cover multiple stripes and can trigger cascading timeouts.
477 static int cl_lock_fits_into(const struct lu_env *env,
478 const struct cl_lock *lock,
479 const struct cl_lock_descr *need,
480 const struct cl_io *io)
482 const struct cl_lock_slice *slice;
484 LINVRNT(cl_lock_invariant_trusted(env, lock));
486 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
487 if (slice->cls_ops->clo_fits_into != NULL &&
488 !slice->cls_ops->clo_fits_into(env, slice, need, io))
494 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
495 struct cl_object *obj,
496 const struct cl_io *io,
497 const struct cl_lock_descr *need)
499 struct cl_lock *lock;
500 struct cl_object_header *head;
501 struct cl_site *site;
505 head = cl_object_header(obj);
506 site = cl_object_site(obj);
507 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
508 cfs_atomic_inc(&site->cs_locks.cs_lookup);
509 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
512 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
513 lock->cll_state < CLS_FREEING &&
514 lock->cll_error == 0 &&
515 !(lock->cll_flags & CLF_CANCELLED) &&
516 cl_lock_fits_into(env, lock, need, io);
517 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
518 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
521 cl_lock_get_trust(lock);
522 cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
530 * Returns a lock matching description \a need.
532 * This is the main entry point into the cl_lock caching interface. First, a
533 * cache (implemented as a per-object linked list) is consulted. If lock is
534 * found there, it is returned immediately. Otherwise new lock is allocated
535 * and returned. In any case, additional reference to lock is acquired.
537 * \see cl_object_find(), cl_page_find()
539 static struct cl_lock *cl_lock_find(const struct lu_env *env,
540 const struct cl_io *io,
541 const struct cl_lock_descr *need)
543 struct cl_object_header *head;
544 struct cl_object *obj;
545 struct cl_lock *lock;
546 struct cl_site *site;
551 head = cl_object_header(obj);
552 site = cl_object_site(obj);
554 cfs_spin_lock(&head->coh_lock_guard);
555 lock = cl_lock_lookup(env, obj, io, need);
556 cfs_spin_unlock(&head->coh_lock_guard);
559 lock = cl_lock_alloc(env, obj, io, need);
561 struct cl_lock *ghost;
563 cfs_spin_lock(&head->coh_lock_guard);
564 ghost = cl_lock_lookup(env, obj, io, need);
566 cfs_list_add_tail(&lock->cll_linkage,
568 cfs_spin_unlock(&head->coh_lock_guard);
569 cfs_atomic_inc(&site->cs_locks.cs_busy);
571 cfs_spin_unlock(&head->coh_lock_guard);
573 * Other threads can acquire references to the
574 * top-lock through its sub-locks. Hence, it
575 * cannot be cl_lock_free()-ed immediately.
577 cl_lock_finish(env, lock);
586 * Returns existing lock matching given description. This is similar to
587 * cl_lock_find() except that no new lock is created, and returned lock is
588 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
590 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
591 const struct cl_lock_descr *need,
592 const char *scope, const void *source)
594 struct cl_object_header *head;
595 struct cl_object *obj;
596 struct cl_lock *lock;
600 head = cl_object_header(obj);
602 cfs_spin_lock(&head->coh_lock_guard);
603 lock = cl_lock_lookup(env, obj, io, need);
604 cfs_spin_unlock(&head->coh_lock_guard);
609 cl_lock_mutex_get(env, lock);
610 if (lock->cll_state == CLS_INTRANSIT)
611 cl_lock_state_wait(env, lock); /* Don't care return value. */
612 if (lock->cll_state == CLS_CACHED) {
614 result = cl_use_try(env, lock, 1);
616 cl_lock_error(env, lock, result);
618 ok = lock->cll_state == CLS_HELD;
620 cl_lock_hold_add(env, lock, scope, source);
621 cl_lock_user_add(env, lock);
622 cl_lock_put(env, lock);
624 cl_lock_mutex_put(env, lock);
626 cl_lock_put(env, lock);
632 EXPORT_SYMBOL(cl_lock_peek);
635 * Returns a slice within a lock, corresponding to the given layer in the
640 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
641 const struct lu_device_type *dtype)
643 const struct cl_lock_slice *slice;
645 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
648 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
649 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
654 EXPORT_SYMBOL(cl_lock_at);
656 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
658 struct cl_thread_counters *counters;
660 counters = cl_lock_counters(env, lock);
662 counters->ctc_nr_locks_locked++;
663 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
664 cl_lock_trace(D_TRACE, env, "got mutex", lock);
668 * Locks cl_lock object.
670 * This is used to manipulate cl_lock fields, and to serialize state
671 * transitions in the lock state machine.
673 * \post cl_lock_is_mutexed(lock)
675 * \see cl_lock_mutex_put()
677 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
679 LINVRNT(cl_lock_invariant(env, lock));
681 if (lock->cll_guarder == cfs_current()) {
682 LINVRNT(cl_lock_is_mutexed(lock));
683 LINVRNT(lock->cll_depth > 0);
685 struct cl_object_header *hdr;
686 struct cl_thread_info *info;
689 LINVRNT(lock->cll_guarder != cfs_current());
690 hdr = cl_object_header(lock->cll_descr.cld_obj);
692 * Check that mutices are taken in the bottom-to-top order.
694 info = cl_env_info(env);
695 for (i = 0; i < hdr->coh_nesting; ++i)
696 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
697 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
698 lock->cll_guarder = cfs_current();
699 LINVRNT(lock->cll_depth == 0);
701 cl_lock_mutex_tail(env, lock);
703 EXPORT_SYMBOL(cl_lock_mutex_get);
706 * Try-locks cl_lock object.
708 * \retval 0 \a lock was successfully locked
710 * \retval -EBUSY \a lock cannot be locked right now
712 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
714 * \see cl_lock_mutex_get()
716 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
720 LINVRNT(cl_lock_invariant_trusted(env, lock));
724 if (lock->cll_guarder == cfs_current()) {
725 LINVRNT(lock->cll_depth > 0);
726 cl_lock_mutex_tail(env, lock);
727 } else if (cfs_mutex_trylock(&lock->cll_guard)) {
728 LINVRNT(lock->cll_depth == 0);
729 lock->cll_guarder = cfs_current();
730 cl_lock_mutex_tail(env, lock);
735 EXPORT_SYMBOL(cl_lock_mutex_try);
738 {* Unlocks cl_lock object.
740 * \pre cl_lock_is_mutexed(lock)
742 * \see cl_lock_mutex_get()
744 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
746 struct cl_thread_counters *counters;
748 LINVRNT(cl_lock_invariant(env, lock));
749 LINVRNT(cl_lock_is_mutexed(lock));
750 LINVRNT(lock->cll_guarder == cfs_current());
751 LINVRNT(lock->cll_depth > 0);
753 counters = cl_lock_counters(env, lock);
754 LINVRNT(counters->ctc_nr_locks_locked > 0);
756 cl_lock_trace(D_TRACE, env, "put mutex", lock);
757 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
758 counters->ctc_nr_locks_locked--;
759 if (--lock->cll_depth == 0) {
760 lock->cll_guarder = NULL;
761 cfs_mutex_unlock(&lock->cll_guard);
764 EXPORT_SYMBOL(cl_lock_mutex_put);
767 * Returns true iff lock's mutex is owned by the current thread.
769 int cl_lock_is_mutexed(struct cl_lock *lock)
771 return lock->cll_guarder == cfs_current();
773 EXPORT_SYMBOL(cl_lock_is_mutexed);
776 * Returns number of cl_lock mutices held by the current thread (environment).
778 int cl_lock_nr_mutexed(const struct lu_env *env)
780 struct cl_thread_info *info;
785 * NOTE: if summation across all nesting levels (currently 2) proves
786 * too expensive, a summary counter can be added to
787 * struct cl_thread_info.
789 info = cl_env_info(env);
790 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
791 locked += info->clt_counters[i].ctc_nr_locks_locked;
794 EXPORT_SYMBOL(cl_lock_nr_mutexed);
796 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
798 LINVRNT(cl_lock_is_mutexed(lock));
799 LINVRNT(cl_lock_invariant(env, lock));
801 if (!(lock->cll_flags & CLF_CANCELLED)) {
802 const struct cl_lock_slice *slice;
804 lock->cll_flags |= CLF_CANCELLED;
805 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
807 if (slice->cls_ops->clo_cancel != NULL)
808 slice->cls_ops->clo_cancel(env, slice);
814 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
816 struct cl_object_header *head;
817 const struct cl_lock_slice *slice;
819 LINVRNT(cl_lock_is_mutexed(lock));
820 LINVRNT(cl_lock_invariant(env, lock));
823 if (lock->cll_state < CLS_FREEING) {
824 LASSERT(lock->cll_state != CLS_INTRANSIT);
825 cl_lock_state_set(env, lock, CLS_FREEING);
827 head = cl_object_header(lock->cll_descr.cld_obj);
829 cfs_spin_lock(&head->coh_lock_guard);
830 cfs_list_del_init(&lock->cll_linkage);
832 cfs_spin_unlock(&head->coh_lock_guard);
834 * From now on, no new references to this lock can be acquired
835 * by cl_lock_lookup().
837 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
839 if (slice->cls_ops->clo_delete != NULL)
840 slice->cls_ops->clo_delete(env, slice);
843 * From now on, no new references to this lock can be acquired
844 * by layer-specific means (like a pointer from struct
845 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
848 * Lock will be finally freed in cl_lock_put() when last of
849 * existing references goes away.
856 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
857 * top-lock (nesting == 0) accounts for this modification in the per-thread
858 * debugging counters. Sub-lock holds can be released by a thread different
859 * from one that acquired it.
861 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
864 struct cl_thread_counters *counters;
865 enum clt_nesting_level nesting;
867 lock->cll_holds += delta;
868 nesting = cl_lock_nesting(lock);
869 if (nesting == CNL_TOP) {
870 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
871 counters->ctc_nr_held += delta;
872 LASSERT(counters->ctc_nr_held >= 0);
877 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
878 * cl_lock_hold_mod() for the explanation of the debugging code.
880 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
883 struct cl_thread_counters *counters;
884 enum clt_nesting_level nesting;
886 lock->cll_users += delta;
887 nesting = cl_lock_nesting(lock);
888 if (nesting == CNL_TOP) {
889 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
890 counters->ctc_nr_used += delta;
891 LASSERT(counters->ctc_nr_used >= 0);
895 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
896 const char *scope, const void *source)
898 LINVRNT(cl_lock_is_mutexed(lock));
899 LINVRNT(cl_lock_invariant(env, lock));
900 LASSERT(lock->cll_holds > 0);
903 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
904 lu_ref_del(&lock->cll_holders, scope, source);
905 cl_lock_hold_mod(env, lock, -1);
906 if (lock->cll_holds == 0) {
907 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
908 lock->cll_descr.cld_mode == CLM_GROUP)
910 * If lock is still phantom or grouplock when user is
911 * done with it---destroy the lock.
913 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
914 if (lock->cll_flags & CLF_CANCELPEND) {
915 lock->cll_flags &= ~CLF_CANCELPEND;
916 cl_lock_cancel0(env, lock);
918 if (lock->cll_flags & CLF_DOOMED) {
919 /* no longer doomed: it's dead... Jim. */
920 lock->cll_flags &= ~CLF_DOOMED;
921 cl_lock_delete0(env, lock);
928 * Waits until lock state is changed.
930 * This function is called with cl_lock mutex locked, atomically releases
931 * mutex and goes to sleep, waiting for a lock state change (signaled by
932 * cl_lock_signal()), and re-acquires the mutex before return.
934 * This function is used to wait until lock state machine makes some progress
935 * and to emulate synchronous operations on top of asynchronous lock
938 * \retval -EINTR wait was interrupted
940 * \retval 0 wait wasn't interrupted
942 * \pre cl_lock_is_mutexed(lock)
944 * \see cl_lock_signal()
946 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
948 cfs_waitlink_t waiter;
949 cfs_sigset_t blocked;
953 LINVRNT(cl_lock_is_mutexed(lock));
954 LINVRNT(cl_lock_invariant(env, lock));
955 LASSERT(lock->cll_depth == 1);
956 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
958 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
959 result = lock->cll_error;
961 /* To avoid being interrupted by the 'non-fatal' signals
962 * (SIGCHLD, for instance), we'd block them temporarily.
964 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
966 cfs_waitlink_init(&waiter);
967 cfs_waitq_add(&lock->cll_wq, &waiter);
968 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
969 cl_lock_mutex_put(env, lock);
971 LASSERT(cl_lock_nr_mutexed(env) == 0);
972 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
974 cl_lock_mutex_get(env, lock);
975 cfs_set_current_state(CFS_TASK_RUNNING);
976 cfs_waitq_del(&lock->cll_wq, &waiter);
977 result = cfs_signal_pending() ? -EINTR : 0;
979 /* Restore old blocked signals */
980 cfs_restore_sigs(blocked);
984 EXPORT_SYMBOL(cl_lock_state_wait);
986 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
987 enum cl_lock_state state)
989 const struct cl_lock_slice *slice;
992 LINVRNT(cl_lock_is_mutexed(lock));
993 LINVRNT(cl_lock_invariant(env, lock));
995 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
996 if (slice->cls_ops->clo_state != NULL)
997 slice->cls_ops->clo_state(env, slice, state);
998 cfs_waitq_broadcast(&lock->cll_wq);
1003 * Notifies waiters that lock state changed.
1005 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1006 * layers about state change by calling cl_lock_operations::clo_state()
1009 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1012 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1013 cl_lock_state_signal(env, lock, lock->cll_state);
1016 EXPORT_SYMBOL(cl_lock_signal);
1019 * Changes lock state.
1021 * This function is invoked to notify layers that lock state changed, possible
1022 * as a result of an asynchronous event such as call-back reception.
1024 * \post lock->cll_state == state
1026 * \see cl_lock_operations::clo_state()
1028 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1029 enum cl_lock_state state)
1031 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1034 LASSERT(lock->cll_state <= state ||
1035 (lock->cll_state == CLS_CACHED &&
1036 (state == CLS_HELD || /* lock found in cache */
1037 state == CLS_NEW || /* sub-lock canceled */
1038 state == CLS_INTRANSIT)) ||
1039 /* lock is in transit state */
1040 lock->cll_state == CLS_INTRANSIT);
1042 if (lock->cll_state != state) {
1043 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1044 cfs_atomic_inc(&site->cs_locks_state[state]);
1046 cl_lock_state_signal(env, lock, state);
1047 lock->cll_state = state;
1051 EXPORT_SYMBOL(cl_lock_state_set);
1053 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1055 const struct cl_lock_slice *slice;
1061 LINVRNT(cl_lock_is_mutexed(lock));
1062 LINVRNT(cl_lock_invariant(env, lock));
1063 LASSERT(lock->cll_state == CLS_INTRANSIT);
1066 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1068 if (slice->cls_ops->clo_unuse != NULL) {
1069 result = slice->cls_ops->clo_unuse(env, slice);
1074 LASSERT(result != -ENOSYS);
1075 } while (result == CLO_REPEAT);
1081 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1082 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1083 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1084 * use process atomic
1086 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1088 const struct cl_lock_slice *slice;
1090 enum cl_lock_state state;
1093 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1095 LASSERT(lock->cll_state == CLS_CACHED);
1096 if (lock->cll_error)
1097 RETURN(lock->cll_error);
1100 state = cl_lock_intransit(env, lock);
1101 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1102 if (slice->cls_ops->clo_use != NULL) {
1103 result = slice->cls_ops->clo_use(env, slice);
1108 LASSERT(result != -ENOSYS);
1110 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1116 if (result == -ESTALE) {
1118 * ESTALE means sublock being cancelled
1119 * at this time, and set lock state to
1120 * be NEW here and ask the caller to repeat.
1123 result = CLO_REPEAT;
1126 /* @atomic means back-off-on-failure. */
1129 rc = cl_unuse_try_internal(env, lock);
1130 /* Vet the results. */
1131 if (rc < 0 && result > 0)
1136 cl_lock_extransit(env, lock, state);
1139 EXPORT_SYMBOL(cl_use_try);
1142 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1145 static int cl_enqueue_kick(const struct lu_env *env,
1146 struct cl_lock *lock,
1147 struct cl_io *io, __u32 flags)
1150 const struct cl_lock_slice *slice;
1154 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1155 if (slice->cls_ops->clo_enqueue != NULL) {
1156 result = slice->cls_ops->clo_enqueue(env,
1162 LASSERT(result != -ENOSYS);
1167 * Tries to enqueue a lock.
1169 * This function is called repeatedly by cl_enqueue() until either lock is
1170 * enqueued, or error occurs. This function does not block waiting for
1171 * networking communication to complete.
1173 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1174 * lock->cll_state == CLS_HELD)
1176 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1177 * \see cl_lock_state::CLS_ENQUEUED
1179 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1180 struct cl_io *io, __u32 flags)
1185 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1189 LINVRNT(cl_lock_is_mutexed(lock));
1191 if (lock->cll_error != 0)
1193 switch (lock->cll_state) {
1195 cl_lock_state_set(env, lock, CLS_QUEUING);
1199 result = cl_enqueue_kick(env, lock, io, flags);
1201 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1204 LASSERT(cl_lock_is_intransit(lock));
1208 /* yank lock from the cache. */
1209 result = cl_use_try(env, lock, 0);
1218 * impossible, only held locks with increased
1219 * ->cll_holds can be enqueued, and they cannot be
1224 } while (result == CLO_REPEAT);
1226 cl_lock_error(env, lock, result);
1227 RETURN(result ?: lock->cll_error);
1229 EXPORT_SYMBOL(cl_enqueue_try);
1232 * Cancel the conflicting lock found during previous enqueue.
1234 * \retval 0 conflicting lock has been canceled.
1235 * \retval -ve error code.
1237 int cl_lock_enqueue_wait(const struct lu_env *env,
1238 struct cl_lock *lock,
1241 struct cl_lock *conflict;
1245 LASSERT(cl_lock_is_mutexed(lock));
1246 LASSERT(lock->cll_state == CLS_QUEUING);
1247 LASSERT(lock->cll_conflict != NULL);
1249 conflict = lock->cll_conflict;
1250 lock->cll_conflict = NULL;
1252 cl_lock_mutex_put(env, lock);
1253 LASSERT(cl_lock_nr_mutexed(env) == 0);
1255 cl_lock_mutex_get(env, conflict);
1256 cl_lock_cancel(env, conflict);
1257 cl_lock_delete(env, conflict);
1259 while (conflict->cll_state != CLS_FREEING) {
1260 rc = cl_lock_state_wait(env, conflict);
1264 cl_lock_mutex_put(env, conflict);
1265 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1266 cl_lock_put(env, conflict);
1269 cl_lock_mutex_get(env, lock);
1274 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1276 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1277 struct cl_io *io, __u32 enqflags)
1283 LINVRNT(cl_lock_is_mutexed(lock));
1284 LINVRNT(cl_lock_invariant(env, lock));
1285 LASSERT(lock->cll_holds > 0);
1287 cl_lock_user_add(env, lock);
1289 result = cl_enqueue_try(env, lock, io, enqflags);
1290 if (result == CLO_WAIT) {
1291 if (lock->cll_conflict != NULL)
1292 result = cl_lock_enqueue_wait(env, lock, 1);
1294 result = cl_lock_state_wait(env, lock);
1301 cl_lock_user_del(env, lock);
1302 cl_lock_error(env, lock, result);
1304 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1305 lock->cll_state == CLS_ENQUEUED ||
1306 lock->cll_state == CLS_HELD));
1313 * \pre current thread or io owns a hold on lock.
1315 * \post ergo(result == 0, lock->users increased)
1316 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1317 * lock->cll_state == CLS_HELD)
1319 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1320 struct cl_io *io, __u32 enqflags)
1326 cl_lock_lockdep_acquire(env, lock, enqflags);
1327 cl_lock_mutex_get(env, lock);
1328 result = cl_enqueue_locked(env, lock, io, enqflags);
1329 cl_lock_mutex_put(env, lock);
1331 cl_lock_lockdep_release(env, lock);
1332 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1333 lock->cll_state == CLS_HELD));
1336 EXPORT_SYMBOL(cl_enqueue);
1339 * Tries to unlock a lock.
1341 * This function is called repeatedly by cl_unuse() until either lock is
1342 * unlocked, or error occurs.
1343 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1345 * \pre lock->cll_state == CLS_HELD
1347 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1349 * \see cl_unuse() cl_lock_operations::clo_unuse()
1350 * \see cl_lock_state::CLS_CACHED
1352 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1355 enum cl_lock_state state = CLS_NEW;
1358 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1360 LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1361 if (lock->cll_users > 1) {
1362 cl_lock_user_del(env, lock);
1367 * New lock users (->cll_users) are not protecting unlocking
1368 * from proceeding. From this point, lock eventually reaches
1369 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1372 state = cl_lock_intransit(env, lock);
1374 result = cl_unuse_try_internal(env, lock);
1375 LASSERT(lock->cll_state == CLS_INTRANSIT);
1376 LASSERT(result != CLO_WAIT);
1377 cl_lock_user_del(env, lock);
1378 if (result == 0 || result == -ESTALE) {
1380 * Return lock back to the cache. This is the only
1381 * place where lock is moved into CLS_CACHED state.
1383 * If one of ->clo_unuse() methods returned -ESTALE, lock
1384 * cannot be placed into cache and has to be
1385 * re-initialized. This happens e.g., when a sub-lock was
1386 * canceled while unlocking was in progress.
1388 if (state == CLS_HELD && result == 0)
1392 cl_lock_extransit(env, lock, state);
1395 * Hide -ESTALE error.
1396 * If the lock is a glimpse lock, and it has multiple
1397 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1398 * and other sublocks are matched write locks. In this case,
1399 * we can't set this lock to error because otherwise some of
1400 * its sublocks may not be canceled. This causes some dirty
1401 * pages won't be written to OSTs. -jay
1405 CERROR("result = %d, this is unlikely!\n", result);
1406 cl_lock_extransit(env, lock, state);
1409 result = result ?: lock->cll_error;
1411 cl_lock_error(env, lock, result);
1414 EXPORT_SYMBOL(cl_unuse_try);
1416 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1421 result = cl_unuse_try(env, lock);
1423 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1431 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1434 cl_lock_mutex_get(env, lock);
1435 cl_unuse_locked(env, lock);
1436 cl_lock_mutex_put(env, lock);
1437 cl_lock_lockdep_release(env, lock);
1440 EXPORT_SYMBOL(cl_unuse);
1443 * Tries to wait for a lock.
1445 * This function is called repeatedly by cl_wait() until either lock is
1446 * granted, or error occurs. This function does not block waiting for network
1447 * communication to complete.
1449 * \see cl_wait() cl_lock_operations::clo_wait()
1450 * \see cl_lock_state::CLS_HELD
1452 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1454 const struct cl_lock_slice *slice;
1458 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1460 LINVRNT(cl_lock_is_mutexed(lock));
1461 LINVRNT(cl_lock_invariant(env, lock));
1462 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1463 lock->cll_state == CLS_HELD ||
1464 lock->cll_state == CLS_INTRANSIT);
1465 LASSERT(lock->cll_users > 0);
1466 LASSERT(lock->cll_holds > 0);
1469 if (lock->cll_error != 0)
1472 if (cl_lock_is_intransit(lock)) {
1477 if (lock->cll_state == CLS_HELD)
1482 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1483 if (slice->cls_ops->clo_wait != NULL) {
1484 result = slice->cls_ops->clo_wait(env, slice);
1489 LASSERT(result != -ENOSYS);
1491 LASSERT(lock->cll_state != CLS_INTRANSIT);
1492 cl_lock_state_set(env, lock, CLS_HELD);
1494 } while (result == CLO_REPEAT);
1495 RETURN(result ?: lock->cll_error);
1497 EXPORT_SYMBOL(cl_wait_try);
1500 * Waits until enqueued lock is granted.
1502 * \pre current thread or io owns a hold on the lock
1503 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1504 * lock->cll_state == CLS_HELD)
1506 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1508 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1513 cl_lock_mutex_get(env, lock);
1515 LINVRNT(cl_lock_invariant(env, lock));
1516 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1517 "Wrong state %d \n", lock->cll_state);
1518 LASSERT(lock->cll_holds > 0);
1521 result = cl_wait_try(env, lock);
1522 if (result == CLO_WAIT) {
1523 result = cl_lock_state_wait(env, lock);
1530 cl_lock_user_del(env, lock);
1531 cl_lock_error(env, lock, result);
1532 cl_lock_lockdep_release(env, lock);
1534 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1535 cl_lock_mutex_put(env, lock);
1536 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1539 EXPORT_SYMBOL(cl_wait);
1542 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1545 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1547 const struct cl_lock_slice *slice;
1548 unsigned long pound;
1549 unsigned long ounce;
1552 LINVRNT(cl_lock_is_mutexed(lock));
1553 LINVRNT(cl_lock_invariant(env, lock));
1556 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1557 if (slice->cls_ops->clo_weigh != NULL) {
1558 ounce = slice->cls_ops->clo_weigh(env, slice);
1560 if (pound < ounce) /* over-weight^Wflow */
1566 EXPORT_SYMBOL(cl_lock_weigh);
1569 * Notifies layers that lock description changed.
1571 * The server can grant client a lock different from one that was requested
1572 * (e.g., larger in extent). This method is called when actually granted lock
1573 * description becomes known to let layers to accommodate for changed lock
1576 * \see cl_lock_operations::clo_modify()
1578 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1579 const struct cl_lock_descr *desc)
1581 const struct cl_lock_slice *slice;
1582 struct cl_object *obj = lock->cll_descr.cld_obj;
1583 struct cl_object_header *hdr = cl_object_header(obj);
1587 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1588 /* don't allow object to change */
1589 LASSERT(obj == desc->cld_obj);
1590 LINVRNT(cl_lock_is_mutexed(lock));
1591 LINVRNT(cl_lock_invariant(env, lock));
1593 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1594 if (slice->cls_ops->clo_modify != NULL) {
1595 result = slice->cls_ops->clo_modify(env, slice, desc);
1600 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1601 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1603 * Just replace description in place. Nothing more is needed for
1604 * now. If locks were indexed according to their extent and/or mode,
1605 * that index would have to be updated here.
1607 cfs_spin_lock(&hdr->coh_lock_guard);
1608 lock->cll_descr = *desc;
1609 cfs_spin_unlock(&hdr->coh_lock_guard);
1612 EXPORT_SYMBOL(cl_lock_modify);
1615 * Initializes lock closure with a given origin.
1617 * \see cl_lock_closure
1619 void cl_lock_closure_init(const struct lu_env *env,
1620 struct cl_lock_closure *closure,
1621 struct cl_lock *origin, int wait)
1623 LINVRNT(cl_lock_is_mutexed(origin));
1624 LINVRNT(cl_lock_invariant(env, origin));
1626 CFS_INIT_LIST_HEAD(&closure->clc_list);
1627 closure->clc_origin = origin;
1628 closure->clc_wait = wait;
1629 closure->clc_nr = 0;
1631 EXPORT_SYMBOL(cl_lock_closure_init);
1634 * Builds a closure of \a lock.
1636 * Building of a closure consists of adding initial lock (\a lock) into it,
1637 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1638 * methods might call cl_lock_closure_build() recursively again, adding more
1639 * locks to the closure, etc.
1641 * \see cl_lock_closure
1643 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1644 struct cl_lock_closure *closure)
1646 const struct cl_lock_slice *slice;
1650 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1651 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1653 result = cl_lock_enclosure(env, lock, closure);
1655 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1656 if (slice->cls_ops->clo_closure != NULL) {
1657 result = slice->cls_ops->clo_closure(env, slice,
1665 cl_lock_disclosure(env, closure);
1668 EXPORT_SYMBOL(cl_lock_closure_build);
1671 * Adds new lock to a closure.
1673 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1674 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1675 * until next try-lock is likely to succeed.
1677 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1678 struct cl_lock_closure *closure)
1682 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1683 if (!cl_lock_mutex_try(env, lock)) {
1685 * If lock->cll_inclosure is not empty, lock is already in
1688 if (cfs_list_empty(&lock->cll_inclosure)) {
1689 cl_lock_get_trust(lock);
1690 lu_ref_add(&lock->cll_reference, "closure", closure);
1691 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1694 cl_lock_mutex_put(env, lock);
1697 cl_lock_disclosure(env, closure);
1698 if (closure->clc_wait) {
1699 cl_lock_get_trust(lock);
1700 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1701 cl_lock_mutex_put(env, closure->clc_origin);
1703 LASSERT(cl_lock_nr_mutexed(env) == 0);
1704 cl_lock_mutex_get(env, lock);
1705 cl_lock_mutex_put(env, lock);
1707 cl_lock_mutex_get(env, closure->clc_origin);
1708 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1709 cl_lock_put(env, lock);
1711 result = CLO_REPEAT;
1715 EXPORT_SYMBOL(cl_lock_enclosure);
1717 /** Releases mutices of enclosed locks. */
1718 void cl_lock_disclosure(const struct lu_env *env,
1719 struct cl_lock_closure *closure)
1721 struct cl_lock *scan;
1722 struct cl_lock *temp;
1724 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1725 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1727 cfs_list_del_init(&scan->cll_inclosure);
1728 cl_lock_mutex_put(env, scan);
1729 lu_ref_del(&scan->cll_reference, "closure", closure);
1730 cl_lock_put(env, scan);
1733 LASSERT(closure->clc_nr == 0);
1735 EXPORT_SYMBOL(cl_lock_disclosure);
1737 /** Finalizes a closure. */
1738 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1740 LASSERT(closure->clc_nr == 0);
1741 LASSERT(cfs_list_empty(&closure->clc_list));
1743 EXPORT_SYMBOL(cl_lock_closure_fini);
1746 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1747 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1748 * destruction until all holds are released. This is called when a decision is
1749 * made to destroy the lock in the future. E.g., when a blocking AST is
1750 * received on it, or fatal communication error happens.
1752 * Caller must have a reference on this lock to prevent a situation, when
1753 * deleted lock lingers in memory for indefinite time, because nobody calls
1754 * cl_lock_put() to finish it.
1756 * \pre atomic_read(&lock->cll_ref) > 0
1757 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1758 * cl_lock_nr_mutexed(env) == 1)
1759 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1760 * held, as deletion of sub-locks might require releasing a top-lock
1763 * \see cl_lock_operations::clo_delete()
1764 * \see cl_lock::cll_holds
1766 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1768 LINVRNT(cl_lock_is_mutexed(lock));
1769 LINVRNT(cl_lock_invariant(env, lock));
1770 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1771 cl_lock_nr_mutexed(env) == 1));
1774 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1775 if (lock->cll_holds == 0)
1776 cl_lock_delete0(env, lock);
1778 lock->cll_flags |= CLF_DOOMED;
1781 EXPORT_SYMBOL(cl_lock_delete);
1784 * Mark lock as irrecoverably failed, and mark it for destruction. This
1785 * happens when, e.g., server fails to grant a lock to us, or networking
1788 * \pre atomic_read(&lock->cll_ref) > 0
1790 * \see clo_lock_delete()
1791 * \see cl_lock::cll_holds
1793 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1795 LINVRNT(cl_lock_is_mutexed(lock));
1796 LINVRNT(cl_lock_invariant(env, lock));
1799 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1800 if (lock->cll_error == 0 && error != 0) {
1801 lock->cll_error = error;
1802 cl_lock_signal(env, lock);
1803 cl_lock_cancel(env, lock);
1804 cl_lock_delete(env, lock);
1808 EXPORT_SYMBOL(cl_lock_error);
1811 * Cancels this lock. Notifies layers
1812 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1813 * there are holds on the lock, postpone cancellation until
1814 * all holds are released.
1816 * Cancellation notification is delivered to layers at most once.
1818 * \see cl_lock_operations::clo_cancel()
1819 * \see cl_lock::cll_holds
1821 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1823 LINVRNT(cl_lock_is_mutexed(lock));
1824 LINVRNT(cl_lock_invariant(env, lock));
1827 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1828 if (lock->cll_holds == 0)
1829 cl_lock_cancel0(env, lock);
1831 lock->cll_flags |= CLF_CANCELPEND;
1834 EXPORT_SYMBOL(cl_lock_cancel);
1837 * Finds an existing lock covering given page and optionally different from a
1838 * given \a except lock.
1840 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1841 struct cl_page *page, struct cl_lock *except,
1842 int pending, int canceld)
1844 struct cl_object_header *head;
1845 struct cl_lock *scan;
1846 struct cl_lock *lock;
1847 struct cl_lock_descr *need;
1851 head = cl_object_header(obj);
1852 need = &cl_env_info(env)->clt_descr;
1855 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1857 need->cld_start = need->cld_end = page->cp_index;
1858 need->cld_enq_flags = 0;
1860 cfs_spin_lock(&head->coh_lock_guard);
1861 /* It is fine to match any group lock since there could be only one
1862 * with a uniq gid and it conflicts with all other lock modes too */
1863 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1864 if (scan != except &&
1865 (scan->cll_descr.cld_mode == CLM_GROUP ||
1866 cl_lock_ext_match(&scan->cll_descr, need)) &&
1867 scan->cll_state >= CLS_HELD &&
1868 scan->cll_state < CLS_FREEING &&
1870 * This check is racy as the lock can be canceled right
1871 * after it is done, but this is fine, because page exists
1874 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1875 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1876 /* Don't increase cs_hit here since this
1877 * is just a helper function. */
1878 cl_lock_get_trust(scan);
1883 cfs_spin_unlock(&head->coh_lock_guard);
1886 EXPORT_SYMBOL(cl_lock_at_page);
1889 * Calculate the page offset at the layer of @lock.
1890 * At the time of this writing, @page is top page and @lock is sub lock.
1892 static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
1894 struct lu_device_type *dtype;
1895 const struct cl_page_slice *slice;
1897 dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
1898 slice = cl_page_at(page, dtype);
1899 LASSERT(slice != NULL);
1900 return slice->cpl_page->cp_index;
1904 * Check if page @page is covered by an extra lock or discard it.
1906 static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
1907 struct cl_page *page, void *cbdata)
1909 struct cl_thread_info *info = cl_env_info(env);
1910 struct cl_lock *lock = cbdata;
1911 pgoff_t index = pgoff_at_lock(page, lock);
1913 if (index >= info->clt_fn_index) {
1914 struct cl_lock *tmp;
1916 /* refresh non-overlapped index */
1917 tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
1920 /* Cache the first-non-overlapped index so as to skip
1921 * all pages within [index, clt_fn_index). This
1922 * is safe because if tmp lock is canceled, it will
1923 * discard these pages. */
1924 info->clt_fn_index = tmp->cll_descr.cld_end + 1;
1925 if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
1926 info->clt_fn_index = CL_PAGE_EOF;
1927 cl_lock_put(env, tmp);
1928 } else if (cl_page_own(env, io, page) == 0) {
1929 /* discard the page */
1930 cl_page_unmap(env, io, page);
1931 cl_page_discard(env, io, page);
1932 cl_page_disown(env, io, page);
1934 LASSERT(page->cp_state == CPS_FREEING);
1938 info->clt_next_index = index + 1;
1939 return CLP_GANG_OKAY;
1942 static int pageout_cb(const struct lu_env *env, struct cl_io *io,
1943 struct cl_page *page, void *cbdata)
1945 struct cl_thread_info *info = cl_env_info(env);
1946 struct cl_page_list *queue = &info->clt_queue.c2_qin;
1947 struct cl_lock *lock = cbdata;
1948 typeof(cl_page_own) *page_own;
1949 int rc = CLP_GANG_OKAY;
1951 page_own = queue->pl_nr ? cl_page_own_try : cl_page_own;
1952 if (page_own(env, io, page) == 0) {
1953 cl_page_list_add(queue, page);
1954 info->clt_next_index = pgoff_at_lock(page, lock) + 1;
1955 } else if (page->cp_state != CPS_FREEING) {
1956 /* cl_page_own() won't fail unless
1957 * the page is being freed. */
1958 LASSERT(queue->pl_nr != 0);
1959 rc = CLP_GANG_AGAIN;
1966 * Invalidate pages protected by the given lock, sending them out to the
1967 * server first, if necessary.
1969 * This function does the following:
1971 * - collects a list of pages to be invalidated,
1973 * - unmaps them from the user virtual memory,
1975 * - sends dirty pages to the server,
1977 * - waits for transfer completion,
1979 * - discards pages, and throws them out of memory.
1981 * If \a discard is set, pages are discarded without sending them to the
1984 * If error happens on any step, the process continues anyway (the reasoning
1985 * behind this being that lock cancellation cannot be delayed indefinitely).
1987 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1990 struct cl_thread_info *info = cl_env_info(env);
1991 struct cl_io *io = &info->clt_io;
1992 struct cl_2queue *queue = &info->clt_queue;
1993 struct cl_lock_descr *descr = &lock->cll_descr;
1994 cl_page_gang_cb_t cb;
1999 LINVRNT(cl_lock_invariant(env, lock));
2002 io->ci_obj = cl_object_top(descr->cld_obj);
2003 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
2007 cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : pageout_cb;
2008 info->clt_fn_index = info->clt_next_index = descr->cld_start;
2010 cl_2queue_init(queue);
2011 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2012 info->clt_next_index, descr->cld_end,
2014 page_count = queue->c2_qin.pl_nr;
2015 if (page_count > 0) {
2016 /* must be writeback case */
2017 LASSERTF(descr->cld_mode >= CLM_WRITE, "lock mode %s\n",
2018 cl_lock_mode_name(descr->cld_mode));
2020 result = cl_page_list_unmap(env, io, &queue->c2_qin);
2022 long timeout = 600; /* 10 minutes. */
2023 /* for debug purpose, if this request can't be
2024 * finished in 10 minutes, we hope it can
2027 result = cl_io_submit_sync(env, io, CRT_WRITE,
2031 CWARN("Writing %lu pages error: %d\n",
2032 page_count, result);
2034 cl_2queue_discard(env, io, queue);
2035 cl_2queue_disown(env, io, queue);
2036 cl_2queue_fini(env, queue);
2039 if (info->clt_next_index > descr->cld_end)
2042 if (res == CLP_GANG_RESCHED)
2044 } while (res != CLP_GANG_OKAY);
2046 cl_io_fini(env, io);
2049 EXPORT_SYMBOL(cl_lock_page_out);
2052 * Eliminate all locks for a given object.
2054 * Caller has to guarantee that no lock is in active use.
2056 * \param cancel when this is set, cl_locks_prune() cancels locks before
2059 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2061 struct cl_object_header *head;
2062 struct cl_lock *lock;
2065 head = cl_object_header(obj);
2067 * If locks are destroyed without cancellation, all pages must be
2068 * already destroyed (as otherwise they will be left unprotected).
2070 LASSERT(ergo(!cancel,
2071 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2073 cfs_spin_lock(&head->coh_lock_guard);
2074 while (!cfs_list_empty(&head->coh_locks)) {
2075 lock = container_of(head->coh_locks.next,
2076 struct cl_lock, cll_linkage);
2077 cl_lock_get_trust(lock);
2078 cfs_spin_unlock(&head->coh_lock_guard);
2079 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2082 cl_lock_mutex_get(env, lock);
2083 if (lock->cll_state < CLS_FREEING) {
2084 LASSERT(lock->cll_holds == 0);
2085 LASSERT(lock->cll_users <= 1);
2086 if (unlikely(lock->cll_users == 1)) {
2087 struct l_wait_info lwi = { 0 };
2089 cl_lock_mutex_put(env, lock);
2090 l_wait_event(lock->cll_wq,
2091 lock->cll_users == 0,
2097 cl_lock_cancel(env, lock);
2098 cl_lock_delete(env, lock);
2100 cl_lock_mutex_put(env, lock);
2101 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2102 cl_lock_put(env, lock);
2103 cfs_spin_lock(&head->coh_lock_guard);
2105 cfs_spin_unlock(&head->coh_lock_guard);
2108 EXPORT_SYMBOL(cl_locks_prune);
2110 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2111 const struct cl_io *io,
2112 const struct cl_lock_descr *need,
2113 const char *scope, const void *source)
2115 struct cl_lock *lock;
2120 lock = cl_lock_find(env, io, need);
2123 cl_lock_mutex_get(env, lock);
2124 if (lock->cll_state < CLS_FREEING &&
2125 !(lock->cll_flags & CLF_CANCELLED)) {
2126 cl_lock_hold_mod(env, lock, +1);
2127 lu_ref_add(&lock->cll_holders, scope, source);
2128 lu_ref_add(&lock->cll_reference, scope, source);
2131 cl_lock_mutex_put(env, lock);
2132 cl_lock_put(env, lock);
2138 * Returns a lock matching \a need description with a reference and a hold on
2141 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2142 * guarantees that lock is not in the CLS_FREEING state on return.
2144 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2145 const struct cl_lock_descr *need,
2146 const char *scope, const void *source)
2148 struct cl_lock *lock;
2152 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2154 cl_lock_mutex_put(env, lock);
2157 EXPORT_SYMBOL(cl_lock_hold);
2160 * Main high-level entry point of cl_lock interface that finds existing or
2161 * enqueues new lock matching given description.
2163 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2164 const struct cl_lock_descr *need,
2165 const char *scope, const void *source)
2167 struct cl_lock *lock;
2169 __u32 enqflags = need->cld_enq_flags;
2173 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2177 rc = cl_enqueue_locked(env, lock, io, enqflags);
2179 if (cl_lock_fits_into(env, lock, need, io)) {
2180 if (!(enqflags & CEF_AGL)) {
2181 cl_lock_mutex_put(env, lock);
2182 cl_lock_lockdep_acquire(env, lock,
2188 cl_unuse_locked(env, lock);
2190 cl_lock_trace(D_DLMTRACE, env,
2191 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2192 cl_lock_hold_release(env, lock, scope, source);
2193 cl_lock_mutex_put(env, lock);
2194 lu_ref_del(&lock->cll_reference, scope, source);
2195 cl_lock_put(env, lock);
2197 LASSERT(enqflags & CEF_AGL);
2199 } else if (rc != 0) {
2205 EXPORT_SYMBOL(cl_lock_request);
2208 * Adds a hold to a known lock.
2210 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2211 const char *scope, const void *source)
2213 LINVRNT(cl_lock_is_mutexed(lock));
2214 LINVRNT(cl_lock_invariant(env, lock));
2215 LASSERT(lock->cll_state != CLS_FREEING);
2218 cl_lock_hold_mod(env, lock, +1);
2220 lu_ref_add(&lock->cll_holders, scope, source);
2221 lu_ref_add(&lock->cll_reference, scope, source);
2224 EXPORT_SYMBOL(cl_lock_hold_add);
2227 * Releases a hold and a reference on a lock, on which caller acquired a
2230 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2231 const char *scope, const void *source)
2233 LINVRNT(cl_lock_invariant(env, lock));
2235 cl_lock_hold_release(env, lock, scope, source);
2236 lu_ref_del(&lock->cll_reference, scope, source);
2237 cl_lock_put(env, lock);
2240 EXPORT_SYMBOL(cl_lock_unhold);
2243 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2245 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2246 const char *scope, const void *source)
2248 LINVRNT(cl_lock_invariant(env, lock));
2250 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2251 cl_lock_mutex_get(env, lock);
2252 cl_lock_hold_release(env, lock, scope, source);
2253 cl_lock_mutex_put(env, lock);
2254 lu_ref_del(&lock->cll_reference, scope, source);
2255 cl_lock_put(env, lock);
2258 EXPORT_SYMBOL(cl_lock_release);
2260 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2262 LINVRNT(cl_lock_is_mutexed(lock));
2263 LINVRNT(cl_lock_invariant(env, lock));
2266 cl_lock_used_mod(env, lock, +1);
2269 EXPORT_SYMBOL(cl_lock_user_add);
2271 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2273 LINVRNT(cl_lock_is_mutexed(lock));
2274 LINVRNT(cl_lock_invariant(env, lock));
2275 LASSERT(lock->cll_users > 0);
2278 cl_lock_used_mod(env, lock, -1);
2279 if (lock->cll_users == 0)
2280 cfs_waitq_broadcast(&lock->cll_wq);
2283 EXPORT_SYMBOL(cl_lock_user_del);
2285 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2287 static const char *names[] = {
2288 [CLM_PHANTOM] = "P",
2293 if (0 <= mode && mode < ARRAY_SIZE(names))
2298 EXPORT_SYMBOL(cl_lock_mode_name);
2301 * Prints human readable representation of a lock description.
2303 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2304 lu_printer_t printer,
2305 const struct cl_lock_descr *descr)
2307 const struct lu_fid *fid;
2309 fid = lu_object_fid(&descr->cld_obj->co_lu);
2310 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2312 EXPORT_SYMBOL(cl_lock_descr_print);
2315 * Prints human readable representation of \a lock to the \a f.
2317 void cl_lock_print(const struct lu_env *env, void *cookie,
2318 lu_printer_t printer, const struct cl_lock *lock)
2320 const struct cl_lock_slice *slice;
2321 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2322 lock, cfs_atomic_read(&lock->cll_ref),
2323 lock->cll_state, lock->cll_error, lock->cll_holds,
2324 lock->cll_users, lock->cll_flags);
2325 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2326 (*printer)(env, cookie, " {\n");
2328 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2329 (*printer)(env, cookie, " %s@%p: ",
2330 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2332 if (slice->cls_ops->clo_print != NULL)
2333 slice->cls_ops->clo_print(env, cookie, printer, slice);
2334 (*printer)(env, cookie, "\n");
2336 (*printer)(env, cookie, "} lock@%p\n", lock);
2338 EXPORT_SYMBOL(cl_lock_print);
2340 int cl_lock_init(void)
2342 return lu_kmem_init(cl_lock_caches);
2345 void cl_lock_fini(void)
2347 lu_kmem_fini(cl_lock_caches);