1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011 Whamcloud, Inc.
36 * This file is part of Lustre, http://www.lustre.org/
37 * Lustre is a trademark of Sun Microsystems, Inc.
41 * Author: Nikita Danilov <nikita.danilov@sun.com>
44 #define DEBUG_SUBSYSTEM S_CLASS
46 # define EXPORT_SYMTAB
49 #include <obd_class.h>
50 #include <obd_support.h>
51 #include <lustre_fid.h>
52 #include <libcfs/list.h>
53 /* lu_time_global_{init,fini}() */
56 #include <cl_object.h>
57 #include "cl_internal.h"
59 /** Lock class of cl_lock::cll_guard */
60 static cfs_lock_class_key_t cl_lock_guard_class;
61 static cfs_mem_cache_t *cl_lock_kmem;
63 static struct lu_kmem_descr cl_lock_caches[] = {
65 .ckd_cache = &cl_lock_kmem,
66 .ckd_name = "cl_lock_kmem",
67 .ckd_size = sizeof (struct cl_lock)
75 * Basic lock invariant that is maintained at all times. Caller either has a
76 * reference to \a lock, or somehow assures that \a lock cannot be freed.
78 * \see cl_lock_invariant()
80 static int cl_lock_invariant_trusted(const struct lu_env *env,
81 const struct cl_lock *lock)
83 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
84 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
85 lock->cll_holds >= lock->cll_users &&
86 lock->cll_holds >= 0 &&
87 lock->cll_users >= 0 &&
92 * Stronger lock invariant, checking that caller has a reference on a lock.
94 * \see cl_lock_invariant_trusted()
96 static int cl_lock_invariant(const struct lu_env *env,
97 const struct cl_lock *lock)
101 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
102 cl_lock_invariant_trusted(env, lock);
103 if (!result && env != NULL)
104 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
109 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
111 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
113 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
117 * Returns a set of counters for this lock, depending on a lock nesting.
119 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
120 const struct cl_lock *lock)
122 struct cl_thread_info *info;
123 enum clt_nesting_level nesting;
125 info = cl_env_info(env);
126 nesting = cl_lock_nesting(lock);
127 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
128 return &info->clt_counters[nesting];
131 static void cl_lock_trace0(int level, const struct lu_env *env,
132 const char *prefix, const struct cl_lock *lock,
133 const char *func, const int line)
135 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
136 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
137 "(%p/%d/%d) at %s():%d\n",
138 prefix, lock, cfs_atomic_read(&lock->cll_ref),
139 lock->cll_guarder, lock->cll_depth,
140 lock->cll_state, lock->cll_error, lock->cll_holds,
141 lock->cll_users, lock->cll_flags,
142 env, h->coh_nesting, cl_lock_nr_mutexed(env),
145 #define cl_lock_trace(level, env, prefix, lock) \
146 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
148 #define RETIP ((unsigned long)__builtin_return_address(0))
150 #ifdef CONFIG_LOCKDEP
151 static cfs_lock_class_key_t cl_lock_key;
153 static void cl_lock_lockdep_init(struct cl_lock *lock)
155 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
158 static void cl_lock_lockdep_acquire(const struct lu_env *env,
159 struct cl_lock *lock, __u32 enqflags)
161 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
162 #ifdef HAVE_LOCK_MAP_ACQUIRE
163 lock_map_acquire(&lock->dep_map);
164 #else /* HAVE_LOCK_MAP_ACQUIRE */
165 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
166 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
167 /* check: */ 2, RETIP);
168 #endif /* HAVE_LOCK_MAP_ACQUIRE */
171 static void cl_lock_lockdep_release(const struct lu_env *env,
172 struct cl_lock *lock)
174 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
175 lock_release(&lock->dep_map, 0, RETIP);
178 #else /* !CONFIG_LOCKDEP */
180 static void cl_lock_lockdep_init(struct cl_lock *lock)
182 static void cl_lock_lockdep_acquire(const struct lu_env *env,
183 struct cl_lock *lock, __u32 enqflags)
185 static void cl_lock_lockdep_release(const struct lu_env *env,
186 struct cl_lock *lock)
189 #endif /* !CONFIG_LOCKDEP */
192 * Adds lock slice to the compound lock.
194 * This is called by cl_object_operations::coo_lock_init() methods to add a
195 * per-layer state to the lock. New state is added at the end of
196 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
198 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
200 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
201 struct cl_object *obj,
202 const struct cl_lock_operations *ops)
205 slice->cls_lock = lock;
206 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
207 slice->cls_obj = obj;
208 slice->cls_ops = ops;
211 EXPORT_SYMBOL(cl_lock_slice_add);
214 * Returns true iff a lock with the mode \a has provides at least the same
215 * guarantees as a lock with the mode \a need.
217 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
219 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
220 need == CLM_PHANTOM || need == CLM_GROUP);
221 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
222 has == CLM_PHANTOM || has == CLM_GROUP);
223 CLASSERT(CLM_PHANTOM < CLM_READ);
224 CLASSERT(CLM_READ < CLM_WRITE);
225 CLASSERT(CLM_WRITE < CLM_GROUP);
227 if (has != CLM_GROUP)
232 EXPORT_SYMBOL(cl_lock_mode_match);
235 * Returns true iff extent portions of lock descriptions match.
237 int cl_lock_ext_match(const struct cl_lock_descr *has,
238 const struct cl_lock_descr *need)
241 has->cld_start <= need->cld_start &&
242 has->cld_end >= need->cld_end &&
243 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
244 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
246 EXPORT_SYMBOL(cl_lock_ext_match);
249 * Returns true iff a lock with the description \a has provides at least the
250 * same guarantees as a lock with the description \a need.
252 int cl_lock_descr_match(const struct cl_lock_descr *has,
253 const struct cl_lock_descr *need)
256 cl_object_same(has->cld_obj, need->cld_obj) &&
257 cl_lock_ext_match(has, need);
259 EXPORT_SYMBOL(cl_lock_descr_match);
261 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
263 struct cl_object *obj = lock->cll_descr.cld_obj;
265 LINVRNT(!cl_lock_is_mutexed(lock));
268 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
270 while (!cfs_list_empty(&lock->cll_layers)) {
271 struct cl_lock_slice *slice;
273 slice = cfs_list_entry(lock->cll_layers.next,
274 struct cl_lock_slice, cls_linkage);
275 cfs_list_del_init(lock->cll_layers.next);
276 slice->cls_ops->clo_fini(env, slice);
278 cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
279 cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
280 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
281 cl_object_put(env, obj);
282 lu_ref_fini(&lock->cll_reference);
283 lu_ref_fini(&lock->cll_holders);
284 cfs_mutex_destroy(&lock->cll_guard);
285 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
290 * Releases a reference on a lock.
292 * When last reference is released, lock is returned to the cache, unless it
293 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
296 * \see cl_object_put(), cl_page_put()
298 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
300 struct cl_object *obj;
301 struct cl_object_header *head;
302 struct cl_site *site;
304 LINVRNT(cl_lock_invariant(env, lock));
306 obj = lock->cll_descr.cld_obj;
307 LINVRNT(obj != NULL);
308 head = cl_object_header(obj);
309 site = cl_object_site(obj);
311 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
312 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
314 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
315 if (lock->cll_state == CLS_FREEING) {
316 LASSERT(cfs_list_empty(&lock->cll_linkage));
317 cl_lock_free(env, lock);
319 cfs_atomic_dec(&site->cs_locks.cs_busy);
323 EXPORT_SYMBOL(cl_lock_put);
326 * Acquires an additional reference to a lock.
328 * This can be called only by caller already possessing a reference to \a
331 * \see cl_object_get(), cl_page_get()
333 void cl_lock_get(struct cl_lock *lock)
335 LINVRNT(cl_lock_invariant(NULL, lock));
336 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
337 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
338 cfs_atomic_inc(&lock->cll_ref);
340 EXPORT_SYMBOL(cl_lock_get);
343 * Acquires a reference to a lock.
345 * This is much like cl_lock_get(), except that this function can be used to
346 * acquire initial reference to the cached lock. Caller has to deal with all
347 * possible races. Use with care!
349 * \see cl_page_get_trust()
351 void cl_lock_get_trust(struct cl_lock *lock)
353 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
355 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
356 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
357 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
358 cfs_atomic_inc(&site->cs_locks.cs_busy);
360 EXPORT_SYMBOL(cl_lock_get_trust);
363 * Helper function destroying the lock that wasn't completely initialized.
365 * Other threads can acquire references to the top-lock through its
366 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
368 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
370 cl_lock_mutex_get(env, lock);
371 cl_lock_cancel(env, lock);
372 cl_lock_delete(env, lock);
373 cl_lock_mutex_put(env, lock);
374 cl_lock_put(env, lock);
377 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
378 struct cl_object *obj,
379 const struct cl_io *io,
380 const struct cl_lock_descr *descr)
382 struct cl_lock *lock;
383 struct lu_object_header *head;
384 struct cl_site *site = cl_object_site(obj);
387 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
389 cfs_atomic_set(&lock->cll_ref, 1);
390 lock->cll_descr = *descr;
391 lock->cll_state = CLS_NEW;
393 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
395 CFS_INIT_LIST_HEAD(&lock->cll_layers);
396 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
397 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
398 lu_ref_init(&lock->cll_reference);
399 lu_ref_init(&lock->cll_holders);
400 cfs_mutex_init(&lock->cll_guard);
401 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
402 cfs_waitq_init(&lock->cll_wq);
403 head = obj->co_lu.lo_header;
404 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
405 cfs_atomic_inc(&site->cs_locks.cs_total);
406 cfs_atomic_inc(&site->cs_locks.cs_created);
407 cl_lock_lockdep_init(lock);
408 cfs_list_for_each_entry(obj, &head->loh_layers,
412 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
414 cl_lock_finish(env, lock);
420 lock = ERR_PTR(-ENOMEM);
425 * Transfer the lock into INTRANSIT state and return the original state.
427 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
428 * \post state: CLS_INTRANSIT
431 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
432 struct cl_lock *lock)
434 enum cl_lock_state state = lock->cll_state;
436 LASSERT(cl_lock_is_mutexed(lock));
437 LASSERT(state != CLS_INTRANSIT);
438 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
439 "Malformed lock state %d.\n", state);
441 cl_lock_state_set(env, lock, CLS_INTRANSIT);
442 lock->cll_intransit_owner = cfs_current();
443 cl_lock_hold_add(env, lock, "intransit", cfs_current());
446 EXPORT_SYMBOL(cl_lock_intransit);
449 * Exit the intransit state and restore the lock state to the original state
451 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
452 enum cl_lock_state state)
454 LASSERT(cl_lock_is_mutexed(lock));
455 LASSERT(lock->cll_state == CLS_INTRANSIT);
456 LASSERT(state != CLS_INTRANSIT);
457 LASSERT(lock->cll_intransit_owner == cfs_current());
459 lock->cll_intransit_owner = NULL;
460 cl_lock_state_set(env, lock, state);
461 cl_lock_unhold(env, lock, "intransit", cfs_current());
463 EXPORT_SYMBOL(cl_lock_extransit);
466 * Checking whether the lock is intransit state
468 int cl_lock_is_intransit(struct cl_lock *lock)
470 LASSERT(cl_lock_is_mutexed(lock));
471 return lock->cll_state == CLS_INTRANSIT &&
472 lock->cll_intransit_owner != cfs_current();
474 EXPORT_SYMBOL(cl_lock_is_intransit);
476 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
477 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
478 * cover multiple stripes and can trigger cascading timeouts.
480 static int cl_lock_fits_into(const struct lu_env *env,
481 const struct cl_lock *lock,
482 const struct cl_lock_descr *need,
483 const struct cl_io *io)
485 const struct cl_lock_slice *slice;
487 LINVRNT(cl_lock_invariant_trusted(env, lock));
489 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
490 if (slice->cls_ops->clo_fits_into != NULL &&
491 !slice->cls_ops->clo_fits_into(env, slice, need, io))
497 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
498 struct cl_object *obj,
499 const struct cl_io *io,
500 const struct cl_lock_descr *need)
502 struct cl_lock *lock;
503 struct cl_object_header *head;
504 struct cl_site *site;
508 head = cl_object_header(obj);
509 site = cl_object_site(obj);
510 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
511 cfs_atomic_inc(&site->cs_locks.cs_lookup);
512 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
515 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
516 lock->cll_state < CLS_FREEING &&
517 lock->cll_error == 0 &&
518 !(lock->cll_flags & CLF_CANCELLED) &&
519 cl_lock_fits_into(env, lock, need, io);
520 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
521 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
524 cl_lock_get_trust(lock);
525 cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
533 * Returns a lock matching description \a need.
535 * This is the main entry point into the cl_lock caching interface. First, a
536 * cache (implemented as a per-object linked list) is consulted. If lock is
537 * found there, it is returned immediately. Otherwise new lock is allocated
538 * and returned. In any case, additional reference to lock is acquired.
540 * \see cl_object_find(), cl_page_find()
542 static struct cl_lock *cl_lock_find(const struct lu_env *env,
543 const struct cl_io *io,
544 const struct cl_lock_descr *need)
546 struct cl_object_header *head;
547 struct cl_object *obj;
548 struct cl_lock *lock;
549 struct cl_site *site;
554 head = cl_object_header(obj);
555 site = cl_object_site(obj);
557 cfs_spin_lock(&head->coh_lock_guard);
558 lock = cl_lock_lookup(env, obj, io, need);
559 cfs_spin_unlock(&head->coh_lock_guard);
562 lock = cl_lock_alloc(env, obj, io, need);
564 struct cl_lock *ghost;
566 cfs_spin_lock(&head->coh_lock_guard);
567 ghost = cl_lock_lookup(env, obj, io, need);
569 cfs_list_add_tail(&lock->cll_linkage,
571 cfs_spin_unlock(&head->coh_lock_guard);
572 cfs_atomic_inc(&site->cs_locks.cs_busy);
574 cfs_spin_unlock(&head->coh_lock_guard);
576 * Other threads can acquire references to the
577 * top-lock through its sub-locks. Hence, it
578 * cannot be cl_lock_free()-ed immediately.
580 cl_lock_finish(env, lock);
589 * Returns existing lock matching given description. This is similar to
590 * cl_lock_find() except that no new lock is created, and returned lock is
591 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
593 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
594 const struct cl_lock_descr *need,
595 const char *scope, const void *source)
597 struct cl_object_header *head;
598 struct cl_object *obj;
599 struct cl_lock *lock;
603 head = cl_object_header(obj);
605 cfs_spin_lock(&head->coh_lock_guard);
606 lock = cl_lock_lookup(env, obj, io, need);
607 cfs_spin_unlock(&head->coh_lock_guard);
612 cl_lock_mutex_get(env, lock);
613 if (lock->cll_state == CLS_INTRANSIT)
614 cl_lock_state_wait(env, lock); /* Don't care return value. */
615 if (lock->cll_state == CLS_CACHED) {
617 result = cl_use_try(env, lock, 1);
619 cl_lock_error(env, lock, result);
621 ok = lock->cll_state == CLS_HELD;
623 cl_lock_hold_add(env, lock, scope, source);
624 cl_lock_user_add(env, lock);
625 cl_lock_put(env, lock);
627 cl_lock_mutex_put(env, lock);
629 cl_lock_put(env, lock);
635 EXPORT_SYMBOL(cl_lock_peek);
638 * Returns a slice within a lock, corresponding to the given layer in the
643 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
644 const struct lu_device_type *dtype)
646 const struct cl_lock_slice *slice;
648 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
651 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
652 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
657 EXPORT_SYMBOL(cl_lock_at);
659 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
661 struct cl_thread_counters *counters;
663 counters = cl_lock_counters(env, lock);
665 counters->ctc_nr_locks_locked++;
666 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
667 cl_lock_trace(D_TRACE, env, "got mutex", lock);
671 * Locks cl_lock object.
673 * This is used to manipulate cl_lock fields, and to serialize state
674 * transitions in the lock state machine.
676 * \post cl_lock_is_mutexed(lock)
678 * \see cl_lock_mutex_put()
680 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
682 LINVRNT(cl_lock_invariant(env, lock));
684 if (lock->cll_guarder == cfs_current()) {
685 LINVRNT(cl_lock_is_mutexed(lock));
686 LINVRNT(lock->cll_depth > 0);
688 struct cl_object_header *hdr;
689 struct cl_thread_info *info;
692 LINVRNT(lock->cll_guarder != cfs_current());
693 hdr = cl_object_header(lock->cll_descr.cld_obj);
695 * Check that mutices are taken in the bottom-to-top order.
697 info = cl_env_info(env);
698 for (i = 0; i < hdr->coh_nesting; ++i)
699 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
700 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
701 lock->cll_guarder = cfs_current();
702 LINVRNT(lock->cll_depth == 0);
704 cl_lock_mutex_tail(env, lock);
706 EXPORT_SYMBOL(cl_lock_mutex_get);
709 * Try-locks cl_lock object.
711 * \retval 0 \a lock was successfully locked
713 * \retval -EBUSY \a lock cannot be locked right now
715 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
717 * \see cl_lock_mutex_get()
719 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
723 LINVRNT(cl_lock_invariant_trusted(env, lock));
727 if (lock->cll_guarder == cfs_current()) {
728 LINVRNT(lock->cll_depth > 0);
729 cl_lock_mutex_tail(env, lock);
730 } else if (cfs_mutex_trylock(&lock->cll_guard)) {
731 LINVRNT(lock->cll_depth == 0);
732 lock->cll_guarder = cfs_current();
733 cl_lock_mutex_tail(env, lock);
738 EXPORT_SYMBOL(cl_lock_mutex_try);
741 {* Unlocks cl_lock object.
743 * \pre cl_lock_is_mutexed(lock)
745 * \see cl_lock_mutex_get()
747 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
749 struct cl_thread_counters *counters;
751 LINVRNT(cl_lock_invariant(env, lock));
752 LINVRNT(cl_lock_is_mutexed(lock));
753 LINVRNT(lock->cll_guarder == cfs_current());
754 LINVRNT(lock->cll_depth > 0);
756 counters = cl_lock_counters(env, lock);
757 LINVRNT(counters->ctc_nr_locks_locked > 0);
759 cl_lock_trace(D_TRACE, env, "put mutex", lock);
760 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
761 counters->ctc_nr_locks_locked--;
762 if (--lock->cll_depth == 0) {
763 lock->cll_guarder = NULL;
764 cfs_mutex_unlock(&lock->cll_guard);
767 EXPORT_SYMBOL(cl_lock_mutex_put);
770 * Returns true iff lock's mutex is owned by the current thread.
772 int cl_lock_is_mutexed(struct cl_lock *lock)
774 return lock->cll_guarder == cfs_current();
776 EXPORT_SYMBOL(cl_lock_is_mutexed);
779 * Returns number of cl_lock mutices held by the current thread (environment).
781 int cl_lock_nr_mutexed(const struct lu_env *env)
783 struct cl_thread_info *info;
788 * NOTE: if summation across all nesting levels (currently 2) proves
789 * too expensive, a summary counter can be added to
790 * struct cl_thread_info.
792 info = cl_env_info(env);
793 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
794 locked += info->clt_counters[i].ctc_nr_locks_locked;
797 EXPORT_SYMBOL(cl_lock_nr_mutexed);
799 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
801 LINVRNT(cl_lock_is_mutexed(lock));
802 LINVRNT(cl_lock_invariant(env, lock));
804 if (!(lock->cll_flags & CLF_CANCELLED)) {
805 const struct cl_lock_slice *slice;
807 lock->cll_flags |= CLF_CANCELLED;
808 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
810 if (slice->cls_ops->clo_cancel != NULL)
811 slice->cls_ops->clo_cancel(env, slice);
817 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
819 struct cl_object_header *head;
820 const struct cl_lock_slice *slice;
822 LINVRNT(cl_lock_is_mutexed(lock));
823 LINVRNT(cl_lock_invariant(env, lock));
826 if (lock->cll_state < CLS_FREEING) {
827 LASSERT(lock->cll_state != CLS_INTRANSIT);
828 cl_lock_state_set(env, lock, CLS_FREEING);
830 head = cl_object_header(lock->cll_descr.cld_obj);
832 cfs_spin_lock(&head->coh_lock_guard);
833 cfs_list_del_init(&lock->cll_linkage);
835 cfs_spin_unlock(&head->coh_lock_guard);
837 * From now on, no new references to this lock can be acquired
838 * by cl_lock_lookup().
840 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
842 if (slice->cls_ops->clo_delete != NULL)
843 slice->cls_ops->clo_delete(env, slice);
846 * From now on, no new references to this lock can be acquired
847 * by layer-specific means (like a pointer from struct
848 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
851 * Lock will be finally freed in cl_lock_put() when last of
852 * existing references goes away.
859 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
860 * top-lock (nesting == 0) accounts for this modification in the per-thread
861 * debugging counters. Sub-lock holds can be released by a thread different
862 * from one that acquired it.
864 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
867 struct cl_thread_counters *counters;
868 enum clt_nesting_level nesting;
870 lock->cll_holds += delta;
871 nesting = cl_lock_nesting(lock);
872 if (nesting == CNL_TOP) {
873 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
874 counters->ctc_nr_held += delta;
875 LASSERT(counters->ctc_nr_held >= 0);
880 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
881 * cl_lock_hold_mod() for the explanation of the debugging code.
883 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
886 struct cl_thread_counters *counters;
887 enum clt_nesting_level nesting;
889 lock->cll_users += delta;
890 nesting = cl_lock_nesting(lock);
891 if (nesting == CNL_TOP) {
892 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
893 counters->ctc_nr_used += delta;
894 LASSERT(counters->ctc_nr_used >= 0);
898 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
899 const char *scope, const void *source)
901 LINVRNT(cl_lock_is_mutexed(lock));
902 LINVRNT(cl_lock_invariant(env, lock));
903 LASSERT(lock->cll_holds > 0);
906 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
907 lu_ref_del(&lock->cll_holders, scope, source);
908 cl_lock_hold_mod(env, lock, -1);
909 if (lock->cll_holds == 0) {
910 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
911 lock->cll_descr.cld_mode == CLM_GROUP)
913 * If lock is still phantom or grouplock when user is
914 * done with it---destroy the lock.
916 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
917 if (lock->cll_flags & CLF_CANCELPEND) {
918 lock->cll_flags &= ~CLF_CANCELPEND;
919 cl_lock_cancel0(env, lock);
921 if (lock->cll_flags & CLF_DOOMED) {
922 /* no longer doomed: it's dead... Jim. */
923 lock->cll_flags &= ~CLF_DOOMED;
924 cl_lock_delete0(env, lock);
932 * Waits until lock state is changed.
934 * This function is called with cl_lock mutex locked, atomically releases
935 * mutex and goes to sleep, waiting for a lock state change (signaled by
936 * cl_lock_signal()), and re-acquires the mutex before return.
938 * This function is used to wait until lock state machine makes some progress
939 * and to emulate synchronous operations on top of asynchronous lock
942 * \retval -EINTR wait was interrupted
944 * \retval 0 wait wasn't interrupted
946 * \pre cl_lock_is_mutexed(lock)
948 * \see cl_lock_signal()
950 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
952 cfs_waitlink_t waiter;
953 cfs_sigset_t blocked;
957 LINVRNT(cl_lock_is_mutexed(lock));
958 LINVRNT(cl_lock_invariant(env, lock));
959 LASSERT(lock->cll_depth == 1);
960 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
962 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
963 result = lock->cll_error;
965 /* To avoid being interrupted by the 'non-fatal' signals
966 * (SIGCHLD, for instance), we'd block them temporarily.
968 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
970 cfs_waitlink_init(&waiter);
971 cfs_waitq_add(&lock->cll_wq, &waiter);
972 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
973 cl_lock_mutex_put(env, lock);
975 LASSERT(cl_lock_nr_mutexed(env) == 0);
976 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
978 cl_lock_mutex_get(env, lock);
979 cfs_set_current_state(CFS_TASK_RUNNING);
980 cfs_waitq_del(&lock->cll_wq, &waiter);
981 result = cfs_signal_pending() ? -EINTR : 0;
983 /* Restore old blocked signals */
984 cfs_restore_sigs(blocked);
988 EXPORT_SYMBOL(cl_lock_state_wait);
990 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
991 enum cl_lock_state state)
993 const struct cl_lock_slice *slice;
996 LINVRNT(cl_lock_is_mutexed(lock));
997 LINVRNT(cl_lock_invariant(env, lock));
999 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
1000 if (slice->cls_ops->clo_state != NULL)
1001 slice->cls_ops->clo_state(env, slice, state);
1002 cfs_waitq_broadcast(&lock->cll_wq);
1007 * Notifies waiters that lock state changed.
1009 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1010 * layers about state change by calling cl_lock_operations::clo_state()
1013 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1016 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1017 cl_lock_state_signal(env, lock, lock->cll_state);
1020 EXPORT_SYMBOL(cl_lock_signal);
1023 * Changes lock state.
1025 * This function is invoked to notify layers that lock state changed, possible
1026 * as a result of an asynchronous event such as call-back reception.
1028 * \post lock->cll_state == state
1030 * \see cl_lock_operations::clo_state()
1032 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1033 enum cl_lock_state state)
1035 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1038 LASSERT(lock->cll_state <= state ||
1039 (lock->cll_state == CLS_CACHED &&
1040 (state == CLS_HELD || /* lock found in cache */
1041 state == CLS_NEW || /* sub-lock canceled */
1042 state == CLS_INTRANSIT)) ||
1043 /* lock is in transit state */
1044 lock->cll_state == CLS_INTRANSIT);
1046 if (lock->cll_state != state) {
1047 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1048 cfs_atomic_inc(&site->cs_locks_state[state]);
1050 cl_lock_state_signal(env, lock, state);
1051 lock->cll_state = state;
1055 EXPORT_SYMBOL(cl_lock_state_set);
1057 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1059 const struct cl_lock_slice *slice;
1065 LINVRNT(cl_lock_is_mutexed(lock));
1066 LINVRNT(cl_lock_invariant(env, lock));
1067 LASSERT(lock->cll_state == CLS_INTRANSIT);
1070 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1072 if (slice->cls_ops->clo_unuse != NULL) {
1073 result = slice->cls_ops->clo_unuse(env, slice);
1078 LASSERT(result != -ENOSYS);
1079 } while (result == CLO_REPEAT);
1085 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1086 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1087 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1088 * use process atomic
1090 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1092 const struct cl_lock_slice *slice;
1094 enum cl_lock_state state;
1097 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1099 LASSERT(lock->cll_state == CLS_CACHED);
1100 if (lock->cll_error)
1101 RETURN(lock->cll_error);
1104 state = cl_lock_intransit(env, lock);
1105 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1106 if (slice->cls_ops->clo_use != NULL) {
1107 result = slice->cls_ops->clo_use(env, slice);
1112 LASSERT(result != -ENOSYS);
1114 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1120 if (result == -ESTALE) {
1122 * ESTALE means sublock being cancelled
1123 * at this time, and set lock state to
1124 * be NEW here and ask the caller to repeat.
1127 result = CLO_REPEAT;
1130 /* @atomic means back-off-on-failure. */
1133 rc = cl_unuse_try_internal(env, lock);
1134 /* Vet the results. */
1135 if (rc < 0 && result > 0)
1140 cl_lock_extransit(env, lock, state);
1143 EXPORT_SYMBOL(cl_use_try);
1146 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1149 static int cl_enqueue_kick(const struct lu_env *env,
1150 struct cl_lock *lock,
1151 struct cl_io *io, __u32 flags)
1154 const struct cl_lock_slice *slice;
1158 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1159 if (slice->cls_ops->clo_enqueue != NULL) {
1160 result = slice->cls_ops->clo_enqueue(env,
1166 LASSERT(result != -ENOSYS);
1171 * Tries to enqueue a lock.
1173 * This function is called repeatedly by cl_enqueue() until either lock is
1174 * enqueued, or error occurs. This function does not block waiting for
1175 * networking communication to complete.
1177 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1178 * lock->cll_state == CLS_HELD)
1180 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1181 * \see cl_lock_state::CLS_ENQUEUED
1183 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1184 struct cl_io *io, __u32 flags)
1189 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1193 LINVRNT(cl_lock_is_mutexed(lock));
1195 if (lock->cll_error != 0)
1197 switch (lock->cll_state) {
1199 cl_lock_state_set(env, lock, CLS_QUEUING);
1203 result = cl_enqueue_kick(env, lock, io, flags);
1205 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1208 LASSERT(cl_lock_is_intransit(lock));
1212 /* yank lock from the cache. */
1213 result = cl_use_try(env, lock, 0);
1222 * impossible, only held locks with increased
1223 * ->cll_holds can be enqueued, and they cannot be
1228 } while (result == CLO_REPEAT);
1230 cl_lock_error(env, lock, result);
1231 RETURN(result ?: lock->cll_error);
1233 EXPORT_SYMBOL(cl_enqueue_try);
1236 * Cancel the conflicting lock found during previous enqueue.
1238 * \retval 0 conflicting lock has been canceled.
1239 * \retval -ve error code.
1241 int cl_lock_enqueue_wait(const struct lu_env *env,
1242 struct cl_lock *lock,
1245 struct cl_lock *conflict;
1249 LASSERT(cl_lock_is_mutexed(lock));
1250 LASSERT(lock->cll_state == CLS_QUEUING);
1251 LASSERT(lock->cll_conflict != NULL);
1253 conflict = lock->cll_conflict;
1254 lock->cll_conflict = NULL;
1256 cl_lock_mutex_put(env, lock);
1257 LASSERT(cl_lock_nr_mutexed(env) == 0);
1259 cl_lock_mutex_get(env, conflict);
1260 cl_lock_cancel(env, conflict);
1261 cl_lock_delete(env, conflict);
1263 while (conflict->cll_state != CLS_FREEING) {
1264 rc = cl_lock_state_wait(env, conflict);
1268 cl_lock_mutex_put(env, conflict);
1269 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1270 cl_lock_put(env, conflict);
1273 cl_lock_mutex_get(env, lock);
1278 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1280 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1281 struct cl_io *io, __u32 enqflags)
1287 LINVRNT(cl_lock_is_mutexed(lock));
1288 LINVRNT(cl_lock_invariant(env, lock));
1289 LASSERT(lock->cll_holds > 0);
1291 cl_lock_user_add(env, lock);
1293 result = cl_enqueue_try(env, lock, io, enqflags);
1294 if (result == CLO_WAIT) {
1295 if (lock->cll_conflict != NULL)
1296 result = cl_lock_enqueue_wait(env, lock, 1);
1298 result = cl_lock_state_wait(env, lock);
1305 cl_lock_user_del(env, lock);
1306 cl_lock_error(env, lock, result);
1308 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1309 lock->cll_state == CLS_HELD));
1316 * \pre current thread or io owns a hold on lock.
1318 * \post ergo(result == 0, lock->users increased)
1319 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1320 * lock->cll_state == CLS_HELD)
1322 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1323 struct cl_io *io, __u32 enqflags)
1329 cl_lock_lockdep_acquire(env, lock, enqflags);
1330 cl_lock_mutex_get(env, lock);
1331 result = cl_enqueue_locked(env, lock, io, enqflags);
1332 cl_lock_mutex_put(env, lock);
1334 cl_lock_lockdep_release(env, lock);
1335 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1336 lock->cll_state == CLS_HELD));
1339 EXPORT_SYMBOL(cl_enqueue);
1342 * Tries to unlock a lock.
1344 * This function is called repeatedly by cl_unuse() until either lock is
1345 * unlocked, or error occurs.
1346 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1348 * \pre lock->cll_state == CLS_HELD
1350 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1352 * \see cl_unuse() cl_lock_operations::clo_unuse()
1353 * \see cl_lock_state::CLS_CACHED
1355 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1358 enum cl_lock_state state = CLS_NEW;
1361 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1363 LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1364 if (lock->cll_users > 1) {
1365 cl_lock_user_del(env, lock);
1370 * New lock users (->cll_users) are not protecting unlocking
1371 * from proceeding. From this point, lock eventually reaches
1372 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1375 state = cl_lock_intransit(env, lock);
1377 result = cl_unuse_try_internal(env, lock);
1378 LASSERT(lock->cll_state == CLS_INTRANSIT);
1379 LASSERT(result != CLO_WAIT);
1380 cl_lock_user_del(env, lock);
1381 if (result == 0 || result == -ESTALE) {
1383 * Return lock back to the cache. This is the only
1384 * place where lock is moved into CLS_CACHED state.
1386 * If one of ->clo_unuse() methods returned -ESTALE, lock
1387 * cannot be placed into cache and has to be
1388 * re-initialized. This happens e.g., when a sub-lock was
1389 * canceled while unlocking was in progress.
1391 if (state == CLS_HELD && result == 0)
1395 cl_lock_extransit(env, lock, state);
1398 * Hide -ESTALE error.
1399 * If the lock is a glimpse lock, and it has multiple
1400 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1401 * and other sublocks are matched write locks. In this case,
1402 * we can't set this lock to error because otherwise some of
1403 * its sublocks may not be canceled. This causes some dirty
1404 * pages won't be written to OSTs. -jay
1408 CERROR("result = %d, this is unlikely!\n", result);
1409 cl_lock_extransit(env, lock, state);
1412 result = result ?: lock->cll_error;
1414 cl_lock_error(env, lock, result);
1417 EXPORT_SYMBOL(cl_unuse_try);
1419 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1424 result = cl_unuse_try(env, lock);
1426 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1434 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1437 cl_lock_mutex_get(env, lock);
1438 cl_unuse_locked(env, lock);
1439 cl_lock_mutex_put(env, lock);
1440 cl_lock_lockdep_release(env, lock);
1443 EXPORT_SYMBOL(cl_unuse);
1446 * Tries to wait for a lock.
1448 * This function is called repeatedly by cl_wait() until either lock is
1449 * granted, or error occurs. This function does not block waiting for network
1450 * communication to complete.
1452 * \see cl_wait() cl_lock_operations::clo_wait()
1453 * \see cl_lock_state::CLS_HELD
1455 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1457 const struct cl_lock_slice *slice;
1461 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1463 LINVRNT(cl_lock_is_mutexed(lock));
1464 LINVRNT(cl_lock_invariant(env, lock));
1465 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1466 lock->cll_state == CLS_HELD ||
1467 lock->cll_state == CLS_INTRANSIT);
1468 LASSERT(lock->cll_users > 0);
1469 LASSERT(lock->cll_holds > 0);
1472 if (lock->cll_error != 0)
1475 if (cl_lock_is_intransit(lock)) {
1480 if (lock->cll_state == CLS_HELD)
1485 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1486 if (slice->cls_ops->clo_wait != NULL) {
1487 result = slice->cls_ops->clo_wait(env, slice);
1492 LASSERT(result != -ENOSYS);
1494 LASSERT(lock->cll_state != CLS_INTRANSIT);
1495 cl_lock_state_set(env, lock, CLS_HELD);
1497 } while (result == CLO_REPEAT);
1498 RETURN(result ?: lock->cll_error);
1500 EXPORT_SYMBOL(cl_wait_try);
1503 * Waits until enqueued lock is granted.
1505 * \pre current thread or io owns a hold on the lock
1506 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1507 * lock->cll_state == CLS_HELD)
1509 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1511 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1516 cl_lock_mutex_get(env, lock);
1518 LINVRNT(cl_lock_invariant(env, lock));
1519 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1520 "Wrong state %d \n", lock->cll_state);
1521 LASSERT(lock->cll_holds > 0);
1524 result = cl_wait_try(env, lock);
1525 if (result == CLO_WAIT) {
1526 result = cl_lock_state_wait(env, lock);
1533 cl_lock_user_del(env, lock);
1534 cl_lock_error(env, lock, result);
1535 cl_lock_lockdep_release(env, lock);
1537 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1538 cl_lock_mutex_put(env, lock);
1539 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1542 EXPORT_SYMBOL(cl_wait);
1545 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1548 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1550 const struct cl_lock_slice *slice;
1551 unsigned long pound;
1552 unsigned long ounce;
1555 LINVRNT(cl_lock_is_mutexed(lock));
1556 LINVRNT(cl_lock_invariant(env, lock));
1559 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1560 if (slice->cls_ops->clo_weigh != NULL) {
1561 ounce = slice->cls_ops->clo_weigh(env, slice);
1563 if (pound < ounce) /* over-weight^Wflow */
1569 EXPORT_SYMBOL(cl_lock_weigh);
1572 * Notifies layers that lock description changed.
1574 * The server can grant client a lock different from one that was requested
1575 * (e.g., larger in extent). This method is called when actually granted lock
1576 * description becomes known to let layers to accommodate for changed lock
1579 * \see cl_lock_operations::clo_modify()
1581 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1582 const struct cl_lock_descr *desc)
1584 const struct cl_lock_slice *slice;
1585 struct cl_object *obj = lock->cll_descr.cld_obj;
1586 struct cl_object_header *hdr = cl_object_header(obj);
1590 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1591 /* don't allow object to change */
1592 LASSERT(obj == desc->cld_obj);
1593 LINVRNT(cl_lock_is_mutexed(lock));
1594 LINVRNT(cl_lock_invariant(env, lock));
1596 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1597 if (slice->cls_ops->clo_modify != NULL) {
1598 result = slice->cls_ops->clo_modify(env, slice, desc);
1603 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1604 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1606 * Just replace description in place. Nothing more is needed for
1607 * now. If locks were indexed according to their extent and/or mode,
1608 * that index would have to be updated here.
1610 cfs_spin_lock(&hdr->coh_lock_guard);
1611 lock->cll_descr = *desc;
1612 cfs_spin_unlock(&hdr->coh_lock_guard);
1615 EXPORT_SYMBOL(cl_lock_modify);
1618 * Initializes lock closure with a given origin.
1620 * \see cl_lock_closure
1622 void cl_lock_closure_init(const struct lu_env *env,
1623 struct cl_lock_closure *closure,
1624 struct cl_lock *origin, int wait)
1626 LINVRNT(cl_lock_is_mutexed(origin));
1627 LINVRNT(cl_lock_invariant(env, origin));
1629 CFS_INIT_LIST_HEAD(&closure->clc_list);
1630 closure->clc_origin = origin;
1631 closure->clc_wait = wait;
1632 closure->clc_nr = 0;
1634 EXPORT_SYMBOL(cl_lock_closure_init);
1637 * Builds a closure of \a lock.
1639 * Building of a closure consists of adding initial lock (\a lock) into it,
1640 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1641 * methods might call cl_lock_closure_build() recursively again, adding more
1642 * locks to the closure, etc.
1644 * \see cl_lock_closure
1646 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1647 struct cl_lock_closure *closure)
1649 const struct cl_lock_slice *slice;
1653 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1654 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1656 result = cl_lock_enclosure(env, lock, closure);
1658 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1659 if (slice->cls_ops->clo_closure != NULL) {
1660 result = slice->cls_ops->clo_closure(env, slice,
1668 cl_lock_disclosure(env, closure);
1671 EXPORT_SYMBOL(cl_lock_closure_build);
1674 * Adds new lock to a closure.
1676 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1677 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1678 * until next try-lock is likely to succeed.
1680 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1681 struct cl_lock_closure *closure)
1685 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1686 if (!cl_lock_mutex_try(env, lock)) {
1688 * If lock->cll_inclosure is not empty, lock is already in
1691 if (cfs_list_empty(&lock->cll_inclosure)) {
1692 cl_lock_get_trust(lock);
1693 lu_ref_add(&lock->cll_reference, "closure", closure);
1694 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1697 cl_lock_mutex_put(env, lock);
1700 cl_lock_disclosure(env, closure);
1701 if (closure->clc_wait) {
1702 cl_lock_get_trust(lock);
1703 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1704 cl_lock_mutex_put(env, closure->clc_origin);
1706 LASSERT(cl_lock_nr_mutexed(env) == 0);
1707 cl_lock_mutex_get(env, lock);
1708 cl_lock_mutex_put(env, lock);
1710 cl_lock_mutex_get(env, closure->clc_origin);
1711 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1712 cl_lock_put(env, lock);
1714 result = CLO_REPEAT;
1718 EXPORT_SYMBOL(cl_lock_enclosure);
1720 /** Releases mutices of enclosed locks. */
1721 void cl_lock_disclosure(const struct lu_env *env,
1722 struct cl_lock_closure *closure)
1724 struct cl_lock *scan;
1725 struct cl_lock *temp;
1727 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1728 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1730 cfs_list_del_init(&scan->cll_inclosure);
1731 cl_lock_mutex_put(env, scan);
1732 lu_ref_del(&scan->cll_reference, "closure", closure);
1733 cl_lock_put(env, scan);
1736 LASSERT(closure->clc_nr == 0);
1738 EXPORT_SYMBOL(cl_lock_disclosure);
1740 /** Finalizes a closure. */
1741 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1743 LASSERT(closure->clc_nr == 0);
1744 LASSERT(cfs_list_empty(&closure->clc_list));
1746 EXPORT_SYMBOL(cl_lock_closure_fini);
1749 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1750 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1751 * destruction until all holds are released. This is called when a decision is
1752 * made to destroy the lock in the future. E.g., when a blocking AST is
1753 * received on it, or fatal communication error happens.
1755 * Caller must have a reference on this lock to prevent a situation, when
1756 * deleted lock lingers in memory for indefinite time, because nobody calls
1757 * cl_lock_put() to finish it.
1759 * \pre atomic_read(&lock->cll_ref) > 0
1760 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1761 * cl_lock_nr_mutexed(env) == 1)
1762 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1763 * held, as deletion of sub-locks might require releasing a top-lock
1766 * \see cl_lock_operations::clo_delete()
1767 * \see cl_lock::cll_holds
1769 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1771 LINVRNT(cl_lock_is_mutexed(lock));
1772 LINVRNT(cl_lock_invariant(env, lock));
1773 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1774 cl_lock_nr_mutexed(env) == 1));
1777 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1778 if (lock->cll_holds == 0)
1779 cl_lock_delete0(env, lock);
1781 lock->cll_flags |= CLF_DOOMED;
1784 EXPORT_SYMBOL(cl_lock_delete);
1787 * Mark lock as irrecoverably failed, and mark it for destruction. This
1788 * happens when, e.g., server fails to grant a lock to us, or networking
1791 * \pre atomic_read(&lock->cll_ref) > 0
1793 * \see clo_lock_delete()
1794 * \see cl_lock::cll_holds
1796 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1798 LINVRNT(cl_lock_is_mutexed(lock));
1799 LINVRNT(cl_lock_invariant(env, lock));
1802 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1803 if (lock->cll_error == 0 && error != 0) {
1804 lock->cll_error = error;
1805 cl_lock_signal(env, lock);
1806 cl_lock_cancel(env, lock);
1807 cl_lock_delete(env, lock);
1811 EXPORT_SYMBOL(cl_lock_error);
1814 * Cancels this lock. Notifies layers
1815 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1816 * there are holds on the lock, postpone cancellation until
1817 * all holds are released.
1819 * Cancellation notification is delivered to layers at most once.
1821 * \see cl_lock_operations::clo_cancel()
1822 * \see cl_lock::cll_holds
1824 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1826 LINVRNT(cl_lock_is_mutexed(lock));
1827 LINVRNT(cl_lock_invariant(env, lock));
1830 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1831 if (lock->cll_holds == 0)
1832 cl_lock_cancel0(env, lock);
1834 lock->cll_flags |= CLF_CANCELPEND;
1837 EXPORT_SYMBOL(cl_lock_cancel);
1840 * Finds an existing lock covering given page and optionally different from a
1841 * given \a except lock.
1843 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1844 struct cl_page *page, struct cl_lock *except,
1845 int pending, int canceld)
1847 struct cl_object_header *head;
1848 struct cl_lock *scan;
1849 struct cl_lock *lock;
1850 struct cl_lock_descr *need;
1854 head = cl_object_header(obj);
1855 need = &cl_env_info(env)->clt_descr;
1858 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1860 need->cld_start = need->cld_end = page->cp_index;
1861 need->cld_enq_flags = 0;
1863 cfs_spin_lock(&head->coh_lock_guard);
1864 /* It is fine to match any group lock since there could be only one
1865 * with a uniq gid and it conflicts with all other lock modes too */
1866 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1867 if (scan != except &&
1868 (scan->cll_descr.cld_mode == CLM_GROUP ||
1869 cl_lock_ext_match(&scan->cll_descr, need)) &&
1870 scan->cll_state >= CLS_HELD &&
1871 scan->cll_state < CLS_FREEING &&
1873 * This check is racy as the lock can be canceled right
1874 * after it is done, but this is fine, because page exists
1877 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1878 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1879 /* Don't increase cs_hit here since this
1880 * is just a helper function. */
1881 cl_lock_get_trust(scan);
1886 cfs_spin_unlock(&head->coh_lock_guard);
1889 EXPORT_SYMBOL(cl_lock_at_page);
1892 * Returns a list of pages protected (only) by a given lock.
1894 * Scans an extent of page radix tree, corresponding to the \a lock and queues
1895 * all pages that are not protected by locks other than \a lock into \a queue.
1897 void cl_lock_page_list_fixup(const struct lu_env *env,
1898 struct cl_io *io, struct cl_lock *lock,
1899 struct cl_page_list *queue)
1901 struct cl_page *page;
1902 struct cl_page *temp;
1903 struct cl_page_list *plist = &cl_env_info(env)->clt_list;
1905 LINVRNT(cl_lock_invariant(env, lock));
1908 /* No need to fix for WRITE lock because it is exclusive. */
1909 if (lock->cll_descr.cld_mode >= CLM_WRITE)
1912 /* For those pages who are still covered by other PR locks, we should
1913 * not discard them otherwise a [0, EOF) PR lock will discard all
1916 cl_page_list_init(plist);
1917 cl_page_list_for_each_safe(page, temp, queue) {
1918 pgoff_t idx = page->cp_index;
1919 struct cl_lock *found;
1920 struct cl_lock_descr *descr;
1922 /* The algorithm counts on the index-ascending page index. */
1923 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1924 page->cp_index < temp->cp_index));
1926 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1931 descr = &found->cll_descr;
1932 cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1934 idx = page->cp_index;
1935 if (descr->cld_start > idx || descr->cld_end < idx)
1937 cl_page_list_move(plist, queue, page);
1939 cl_lock_put(env, found);
1942 /* The pages in plist are covered by other locks, don't handle them
1946 cl_page_list_disown(env, io, plist);
1947 cl_page_list_fini(env, plist);
1950 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1953 * Invalidate pages protected by the given lock, sending them out to the
1954 * server first, if necessary.
1956 * This function does the following:
1958 * - collects a list of pages to be invalidated,
1960 * - unmaps them from the user virtual memory,
1962 * - sends dirty pages to the server,
1964 * - waits for transfer completion,
1966 * - discards pages, and throws them out of memory.
1968 * If \a discard is set, pages are discarded without sending them to the
1971 * If error happens on any step, the process continues anyway (the reasoning
1972 * behind this being that lock cancellation cannot be delayed indefinitely).
1974 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1977 struct cl_thread_info *info = cl_env_info(env);
1978 struct cl_io *io = &info->clt_io;
1979 struct cl_2queue *queue = &info->clt_queue;
1980 struct cl_lock_descr *descr = &lock->cll_descr;
1981 struct lu_device_type *dtype;
1987 LINVRNT(cl_lock_invariant(env, lock));
1990 io->ci_obj = cl_object_top(descr->cld_obj);
1991 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1995 dtype = descr->cld_obj->co_lu.lo_dev->ld_type;
1996 next_index = descr->cld_start;
1998 const struct cl_page_slice *slice;
2000 cl_2queue_init(queue);
2001 res = cl_page_gang_lookup(env, descr->cld_obj, io,
2002 next_index, descr->cld_end,
2004 page_count = queue->c2_qin.pl_nr;
2005 if (page_count == 0)
2008 /* cl_page_gang_lookup() uses subobj and sublock to look for
2009 * covered pages, but @queue->c2_qin contains the list of top
2010 * pages. We have to turn the page back to subpage so as to
2011 * get `correct' next index. -jay */
2012 slice = cl_page_at(cl_page_list_last(&queue->c2_qin), dtype);
2013 next_index = slice->cpl_page->cp_index + 1;
2015 result = cl_page_list_unmap(env, io, &queue->c2_qin);
2017 long timeout = 600; /* 10 minutes. */
2018 /* for debug purpose, if this request can't be
2019 * finished in 10 minutes, we hope it can notify us.
2021 result = cl_io_submit_sync(env, io, CRT_WRITE, queue,
2022 CRP_CANCEL, timeout);
2024 CWARN("Writing %lu pages error: %d\n",
2025 page_count, result);
2027 cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
2028 cl_2queue_discard(env, io, queue);
2029 cl_2queue_disown(env, io, queue);
2030 cl_2queue_fini(env, queue);
2032 if (next_index > descr->cld_end)
2035 if (res == CLP_GANG_RESCHED)
2037 } while (res != CLP_GANG_OKAY);
2039 cl_io_fini(env, io);
2042 EXPORT_SYMBOL(cl_lock_page_out);
2045 * Eliminate all locks for a given object.
2047 * Caller has to guarantee that no lock is in active use.
2049 * \param cancel when this is set, cl_locks_prune() cancels locks before
2052 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2054 struct cl_object_header *head;
2055 struct cl_lock *lock;
2058 head = cl_object_header(obj);
2060 * If locks are destroyed without cancellation, all pages must be
2061 * already destroyed (as otherwise they will be left unprotected).
2063 LASSERT(ergo(!cancel,
2064 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2066 cfs_spin_lock(&head->coh_lock_guard);
2067 while (!cfs_list_empty(&head->coh_locks)) {
2068 lock = container_of(head->coh_locks.next,
2069 struct cl_lock, cll_linkage);
2070 cl_lock_get_trust(lock);
2071 cfs_spin_unlock(&head->coh_lock_guard);
2072 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2073 cl_lock_mutex_get(env, lock);
2074 if (lock->cll_state < CLS_FREEING) {
2075 LASSERT(lock->cll_holds == 0);
2076 LASSERT(lock->cll_users == 0);
2078 cl_lock_cancel(env, lock);
2079 cl_lock_delete(env, lock);
2081 cl_lock_mutex_put(env, lock);
2082 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2083 cl_lock_put(env, lock);
2084 cfs_spin_lock(&head->coh_lock_guard);
2086 cfs_spin_unlock(&head->coh_lock_guard);
2089 EXPORT_SYMBOL(cl_locks_prune);
2091 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2092 const struct cl_io *io,
2093 const struct cl_lock_descr *need,
2094 const char *scope, const void *source)
2096 struct cl_lock *lock;
2101 lock = cl_lock_find(env, io, need);
2104 cl_lock_mutex_get(env, lock);
2105 if (lock->cll_state < CLS_FREEING &&
2106 !(lock->cll_flags & CLF_CANCELLED)) {
2107 cl_lock_hold_mod(env, lock, +1);
2108 lu_ref_add(&lock->cll_holders, scope, source);
2109 lu_ref_add(&lock->cll_reference, scope, source);
2112 cl_lock_mutex_put(env, lock);
2113 cl_lock_put(env, lock);
2119 * Returns a lock matching \a need description with a reference and a hold on
2122 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2123 * guarantees that lock is not in the CLS_FREEING state on return.
2125 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2126 const struct cl_lock_descr *need,
2127 const char *scope, const void *source)
2129 struct cl_lock *lock;
2133 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2135 cl_lock_mutex_put(env, lock);
2138 EXPORT_SYMBOL(cl_lock_hold);
2141 * Main high-level entry point of cl_lock interface that finds existing or
2142 * enqueues new lock matching given description.
2144 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2145 const struct cl_lock_descr *need,
2146 const char *scope, const void *source)
2148 struct cl_lock *lock;
2150 __u32 enqflags = need->cld_enq_flags;
2154 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2155 if (!IS_ERR(lock)) {
2156 rc = cl_enqueue_locked(env, lock, io, enqflags);
2158 if (cl_lock_fits_into(env, lock, need, io)) {
2159 cl_lock_mutex_put(env, lock);
2160 cl_lock_lockdep_acquire(env,
2164 cl_unuse_locked(env, lock);
2166 cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
2167 cl_lock_hold_release(env, lock, scope, source);
2168 cl_lock_mutex_put(env, lock);
2169 lu_ref_del(&lock->cll_reference, scope, source);
2170 cl_lock_put(env, lock);
2177 EXPORT_SYMBOL(cl_lock_request);
2180 * Adds a hold to a known lock.
2182 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2183 const char *scope, const void *source)
2185 LINVRNT(cl_lock_is_mutexed(lock));
2186 LINVRNT(cl_lock_invariant(env, lock));
2187 LASSERT(lock->cll_state != CLS_FREEING);
2190 cl_lock_hold_mod(env, lock, +1);
2192 lu_ref_add(&lock->cll_holders, scope, source);
2193 lu_ref_add(&lock->cll_reference, scope, source);
2196 EXPORT_SYMBOL(cl_lock_hold_add);
2199 * Releases a hold and a reference on a lock, on which caller acquired a
2202 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2203 const char *scope, const void *source)
2205 LINVRNT(cl_lock_invariant(env, lock));
2207 cl_lock_hold_release(env, lock, scope, source);
2208 lu_ref_del(&lock->cll_reference, scope, source);
2209 cl_lock_put(env, lock);
2212 EXPORT_SYMBOL(cl_lock_unhold);
2215 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2217 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2218 const char *scope, const void *source)
2220 LINVRNT(cl_lock_invariant(env, lock));
2222 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2223 cl_lock_mutex_get(env, lock);
2224 cl_lock_hold_release(env, lock, scope, source);
2225 cl_lock_mutex_put(env, lock);
2226 lu_ref_del(&lock->cll_reference, scope, source);
2227 cl_lock_put(env, lock);
2230 EXPORT_SYMBOL(cl_lock_release);
2232 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2234 LINVRNT(cl_lock_is_mutexed(lock));
2235 LINVRNT(cl_lock_invariant(env, lock));
2238 cl_lock_used_mod(env, lock, +1);
2241 EXPORT_SYMBOL(cl_lock_user_add);
2243 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2245 LINVRNT(cl_lock_is_mutexed(lock));
2246 LINVRNT(cl_lock_invariant(env, lock));
2247 LASSERT(lock->cll_users > 0);
2250 cl_lock_used_mod(env, lock, -1);
2251 RETURN(lock->cll_users == 0);
2253 EXPORT_SYMBOL(cl_lock_user_del);
2255 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2257 static const char *names[] = {
2258 [CLM_PHANTOM] = "P",
2263 if (0 <= mode && mode < ARRAY_SIZE(names))
2268 EXPORT_SYMBOL(cl_lock_mode_name);
2271 * Prints human readable representation of a lock description.
2273 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2274 lu_printer_t printer,
2275 const struct cl_lock_descr *descr)
2277 const struct lu_fid *fid;
2279 fid = lu_object_fid(&descr->cld_obj->co_lu);
2280 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2282 EXPORT_SYMBOL(cl_lock_descr_print);
2285 * Prints human readable representation of \a lock to the \a f.
2287 void cl_lock_print(const struct lu_env *env, void *cookie,
2288 lu_printer_t printer, const struct cl_lock *lock)
2290 const struct cl_lock_slice *slice;
2291 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2292 lock, cfs_atomic_read(&lock->cll_ref),
2293 lock->cll_state, lock->cll_error, lock->cll_holds,
2294 lock->cll_users, lock->cll_flags);
2295 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2296 (*printer)(env, cookie, " {\n");
2298 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2299 (*printer)(env, cookie, " %s@%p: ",
2300 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2302 if (slice->cls_ops->clo_print != NULL)
2303 slice->cls_ops->clo_print(env, cookie, printer, slice);
2304 (*printer)(env, cookie, "\n");
2306 (*printer)(env, cookie, "} lock@%p\n", lock);
2308 EXPORT_SYMBOL(cl_lock_print);
2310 int cl_lock_init(void)
2312 return lu_kmem_init(cl_lock_caches);
2315 void cl_lock_fini(void)
2317 lu_kmem_fini(cl_lock_caches);