4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
39 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
42 #define DEBUG_SUBSYSTEM S_CLASS
44 #include <obd_class.h>
45 #include <obd_support.h>
46 #include <lustre_fid.h>
47 #include <libcfs/list.h>
48 #include <cl_object.h>
49 #include "cl_internal.h"
51 /** Lock class of cl_lock::cll_guard */
52 static struct lock_class_key cl_lock_guard_class;
53 static struct kmem_cache *cl_lock_kmem;
55 static struct lu_kmem_descr cl_lock_caches[] = {
57 .ckd_cache = &cl_lock_kmem,
58 .ckd_name = "cl_lock_kmem",
59 .ckd_size = sizeof (struct cl_lock)
66 #ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
67 #define CS_LOCK_INC(o, item) \
68 cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
69 #define CS_LOCK_DEC(o, item) \
70 cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
71 #define CS_LOCKSTATE_INC(o, state) \
72 cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
73 #define CS_LOCKSTATE_DEC(o, state) \
74 cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
76 #define CS_LOCK_INC(o, item)
77 #define CS_LOCK_DEC(o, item)
78 #define CS_LOCKSTATE_INC(o, state)
79 #define CS_LOCKSTATE_DEC(o, state)
83 * Basic lock invariant that is maintained at all times. Caller either has a
84 * reference to \a lock, or somehow assures that \a lock cannot be freed.
86 * \see cl_lock_invariant()
88 static int cl_lock_invariant_trusted(const struct lu_env *env,
89 const struct cl_lock *lock)
91 return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
92 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
93 lock->cll_holds >= lock->cll_users &&
94 lock->cll_holds >= 0 &&
95 lock->cll_users >= 0 &&
100 * Stronger lock invariant, checking that caller has a reference on a lock.
102 * \see cl_lock_invariant_trusted()
104 static int cl_lock_invariant(const struct lu_env *env,
105 const struct cl_lock *lock)
109 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
110 cl_lock_invariant_trusted(env, lock);
111 if (!result && env != NULL)
112 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
117 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
119 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
121 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
125 * Returns a set of counters for this lock, depending on a lock nesting.
127 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
128 const struct cl_lock *lock)
130 struct cl_thread_info *info;
131 enum clt_nesting_level nesting;
133 info = cl_env_info(env);
134 nesting = cl_lock_nesting(lock);
135 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
136 return &info->clt_counters[nesting];
139 static void cl_lock_trace0(int level, const struct lu_env *env,
140 const char *prefix, const struct cl_lock *lock,
141 const char *func, const int line)
143 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
144 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
145 "(%p/%d/%d) at %s():%d\n",
146 prefix, lock, cfs_atomic_read(&lock->cll_ref),
147 lock->cll_guarder, lock->cll_depth,
148 lock->cll_state, lock->cll_error, lock->cll_holds,
149 lock->cll_users, lock->cll_flags,
150 env, h->coh_nesting, cl_lock_nr_mutexed(env),
153 #define cl_lock_trace(level, env, prefix, lock) \
154 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
156 #define RETIP ((unsigned long)__builtin_return_address(0))
158 #ifdef CONFIG_LOCKDEP
159 static struct lock_class_key cl_lock_key;
161 static void cl_lock_lockdep_init(struct cl_lock *lock)
163 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
166 static void cl_lock_lockdep_acquire(const struct lu_env *env,
167 struct cl_lock *lock, __u32 enqflags)
169 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
170 lock_map_acquire(&lock->dep_map);
173 static void cl_lock_lockdep_release(const struct lu_env *env,
174 struct cl_lock *lock)
176 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
177 lock_map_release(&lock->dep_map);
180 #else /* !CONFIG_LOCKDEP */
182 static void cl_lock_lockdep_init(struct cl_lock *lock)
184 static void cl_lock_lockdep_acquire(const struct lu_env *env,
185 struct cl_lock *lock, __u32 enqflags)
187 static void cl_lock_lockdep_release(const struct lu_env *env,
188 struct cl_lock *lock)
191 #endif /* !CONFIG_LOCKDEP */
194 * Adds lock slice to the compound lock.
196 * This is called by cl_object_operations::coo_lock_init() methods to add a
197 * per-layer state to the lock. New state is added at the end of
198 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
200 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
202 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
203 struct cl_object *obj,
204 const struct cl_lock_operations *ops)
207 slice->cls_lock = lock;
208 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
209 slice->cls_obj = obj;
210 slice->cls_ops = ops;
213 EXPORT_SYMBOL(cl_lock_slice_add);
216 * Returns true iff a lock with the mode \a has provides at least the same
217 * guarantees as a lock with the mode \a need.
219 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
221 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
222 need == CLM_PHANTOM || need == CLM_GROUP);
223 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
224 has == CLM_PHANTOM || has == CLM_GROUP);
225 CLASSERT(CLM_PHANTOM < CLM_READ);
226 CLASSERT(CLM_READ < CLM_WRITE);
227 CLASSERT(CLM_WRITE < CLM_GROUP);
229 if (has != CLM_GROUP)
234 EXPORT_SYMBOL(cl_lock_mode_match);
237 * Returns true iff extent portions of lock descriptions match.
239 int cl_lock_ext_match(const struct cl_lock_descr *has,
240 const struct cl_lock_descr *need)
243 has->cld_start <= need->cld_start &&
244 has->cld_end >= need->cld_end &&
245 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
246 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
248 EXPORT_SYMBOL(cl_lock_ext_match);
251 * Returns true iff a lock with the description \a has provides at least the
252 * same guarantees as a lock with the description \a need.
254 int cl_lock_descr_match(const struct cl_lock_descr *has,
255 const struct cl_lock_descr *need)
258 cl_object_same(has->cld_obj, need->cld_obj) &&
259 cl_lock_ext_match(has, need);
261 EXPORT_SYMBOL(cl_lock_descr_match);
263 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
265 struct cl_object *obj = lock->cll_descr.cld_obj;
267 LINVRNT(!cl_lock_is_mutexed(lock));
270 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
272 while (!cfs_list_empty(&lock->cll_layers)) {
273 struct cl_lock_slice *slice;
275 slice = cfs_list_entry(lock->cll_layers.next,
276 struct cl_lock_slice, cls_linkage);
277 cfs_list_del_init(lock->cll_layers.next);
278 slice->cls_ops->clo_fini(env, slice);
280 CS_LOCK_DEC(obj, total);
281 CS_LOCKSTATE_DEC(obj, lock->cll_state);
282 lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
283 cl_object_put(env, obj);
284 lu_ref_fini(&lock->cll_reference);
285 lu_ref_fini(&lock->cll_holders);
286 mutex_destroy(&lock->cll_guard);
287 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
292 * Releases a reference on a lock.
294 * When last reference is released, lock is returned to the cache, unless it
295 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
298 * \see cl_object_put(), cl_page_put()
300 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
302 struct cl_object *obj;
304 LINVRNT(cl_lock_invariant(env, lock));
306 obj = lock->cll_descr.cld_obj;
307 LINVRNT(obj != NULL);
309 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
310 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
312 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
313 if (lock->cll_state == CLS_FREEING) {
314 LASSERT(cfs_list_empty(&lock->cll_linkage));
315 cl_lock_free(env, lock);
317 CS_LOCK_DEC(obj, busy);
321 EXPORT_SYMBOL(cl_lock_put);
324 * Acquires an additional reference to a lock.
326 * This can be called only by caller already possessing a reference to \a
329 * \see cl_object_get(), cl_page_get()
331 void cl_lock_get(struct cl_lock *lock)
333 LINVRNT(cl_lock_invariant(NULL, lock));
334 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
335 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
336 cfs_atomic_inc(&lock->cll_ref);
338 EXPORT_SYMBOL(cl_lock_get);
341 * Acquires a reference to a lock.
343 * This is much like cl_lock_get(), except that this function can be used to
344 * acquire initial reference to the cached lock. Caller has to deal with all
345 * possible races. Use with care!
347 * \see cl_page_get_trust()
349 void cl_lock_get_trust(struct cl_lock *lock)
351 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
352 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
353 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
354 CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
356 EXPORT_SYMBOL(cl_lock_get_trust);
359 * Helper function destroying the lock that wasn't completely initialized.
361 * Other threads can acquire references to the top-lock through its
362 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
364 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
366 cl_lock_mutex_get(env, lock);
367 cl_lock_cancel(env, lock);
368 cl_lock_delete(env, lock);
369 cl_lock_mutex_put(env, lock);
370 cl_lock_put(env, lock);
373 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
374 struct cl_object *obj,
375 const struct cl_io *io,
376 const struct cl_lock_descr *descr)
378 struct cl_lock *lock;
379 struct lu_object_header *head;
382 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
384 cfs_atomic_set(&lock->cll_ref, 1);
385 lock->cll_descr = *descr;
386 lock->cll_state = CLS_NEW;
388 lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
390 CFS_INIT_LIST_HEAD(&lock->cll_layers);
391 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
392 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
393 lu_ref_init(&lock->cll_reference);
394 lu_ref_init(&lock->cll_holders);
395 mutex_init(&lock->cll_guard);
396 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
397 init_waitqueue_head(&lock->cll_wq);
398 head = obj->co_lu.lo_header;
399 CS_LOCKSTATE_INC(obj, CLS_NEW);
400 CS_LOCK_INC(obj, total);
401 CS_LOCK_INC(obj, create);
402 cl_lock_lockdep_init(lock);
403 cfs_list_for_each_entry(obj, &head->loh_layers,
407 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
409 cl_lock_finish(env, lock);
415 lock = ERR_PTR(-ENOMEM);
420 * Transfer the lock into INTRANSIT state and return the original state.
422 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
423 * \post state: CLS_INTRANSIT
426 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
427 struct cl_lock *lock)
429 enum cl_lock_state state = lock->cll_state;
431 LASSERT(cl_lock_is_mutexed(lock));
432 LASSERT(state != CLS_INTRANSIT);
433 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
434 "Malformed lock state %d.\n", state);
436 cl_lock_state_set(env, lock, CLS_INTRANSIT);
437 lock->cll_intransit_owner = current;
438 cl_lock_hold_add(env, lock, "intransit", current);
441 EXPORT_SYMBOL(cl_lock_intransit);
444 * Exit the intransit state and restore the lock state to the original state
446 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
447 enum cl_lock_state state)
449 LASSERT(cl_lock_is_mutexed(lock));
450 LASSERT(lock->cll_state == CLS_INTRANSIT);
451 LASSERT(state != CLS_INTRANSIT);
452 LASSERT(lock->cll_intransit_owner == current);
454 lock->cll_intransit_owner = NULL;
455 cl_lock_state_set(env, lock, state);
456 cl_lock_unhold(env, lock, "intransit", current);
458 EXPORT_SYMBOL(cl_lock_extransit);
461 * Checking whether the lock is intransit state
463 int cl_lock_is_intransit(struct cl_lock *lock)
465 LASSERT(cl_lock_is_mutexed(lock));
466 return lock->cll_state == CLS_INTRANSIT &&
467 lock->cll_intransit_owner != current;
469 EXPORT_SYMBOL(cl_lock_is_intransit);
471 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
472 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
473 * cover multiple stripes and can trigger cascading timeouts.
475 static int cl_lock_fits_into(const struct lu_env *env,
476 const struct cl_lock *lock,
477 const struct cl_lock_descr *need,
478 const struct cl_io *io)
480 const struct cl_lock_slice *slice;
482 LINVRNT(cl_lock_invariant_trusted(env, lock));
484 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
485 if (slice->cls_ops->clo_fits_into != NULL &&
486 !slice->cls_ops->clo_fits_into(env, slice, need, io))
492 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
493 struct cl_object *obj,
494 const struct cl_io *io,
495 const struct cl_lock_descr *need)
497 struct cl_lock *lock;
498 struct cl_object_header *head;
502 head = cl_object_header(obj);
503 LINVRNT(spin_is_locked(&head->coh_lock_guard));
504 CS_LOCK_INC(obj, lookup);
505 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
508 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
509 lock->cll_state < CLS_FREEING &&
510 lock->cll_error == 0 &&
511 !(lock->cll_flags & CLF_CANCELLED) &&
512 cl_lock_fits_into(env, lock, need, io);
513 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
514 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
517 cl_lock_get_trust(lock);
518 CS_LOCK_INC(obj, hit);
526 * Returns a lock matching description \a need.
528 * This is the main entry point into the cl_lock caching interface. First, a
529 * cache (implemented as a per-object linked list) is consulted. If lock is
530 * found there, it is returned immediately. Otherwise new lock is allocated
531 * and returned. In any case, additional reference to lock is acquired.
533 * \see cl_object_find(), cl_page_find()
535 static struct cl_lock *cl_lock_find(const struct lu_env *env,
536 const struct cl_io *io,
537 const struct cl_lock_descr *need)
539 struct cl_object_header *head;
540 struct cl_object *obj;
541 struct cl_lock *lock;
546 head = cl_object_header(obj);
548 spin_lock(&head->coh_lock_guard);
549 lock = cl_lock_lookup(env, obj, io, need);
550 spin_unlock(&head->coh_lock_guard);
553 lock = cl_lock_alloc(env, obj, io, need);
555 struct cl_lock *ghost;
557 spin_lock(&head->coh_lock_guard);
558 ghost = cl_lock_lookup(env, obj, io, need);
560 cfs_list_add_tail(&lock->cll_linkage,
562 spin_unlock(&head->coh_lock_guard);
563 CS_LOCK_INC(obj, busy);
565 spin_unlock(&head->coh_lock_guard);
567 * Other threads can acquire references to the
568 * top-lock through its sub-locks. Hence, it
569 * cannot be cl_lock_free()-ed immediately.
571 cl_lock_finish(env, lock);
580 * Returns existing lock matching given description. This is similar to
581 * cl_lock_find() except that no new lock is created, and returned lock is
582 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
584 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
585 const struct cl_lock_descr *need,
586 const char *scope, const void *source)
588 struct cl_object_header *head;
589 struct cl_object *obj;
590 struct cl_lock *lock;
593 head = cl_object_header(obj);
596 spin_lock(&head->coh_lock_guard);
597 lock = cl_lock_lookup(env, obj, io, need);
598 spin_unlock(&head->coh_lock_guard);
602 cl_lock_mutex_get(env, lock);
603 if (lock->cll_state == CLS_INTRANSIT)
604 /* Don't care return value. */
605 cl_lock_state_wait(env, lock);
606 if (lock->cll_state == CLS_FREEING) {
607 cl_lock_mutex_put(env, lock);
608 cl_lock_put(env, lock);
611 } while (lock == NULL);
613 cl_lock_hold_add(env, lock, scope, source);
614 cl_lock_user_add(env, lock);
615 if (lock->cll_state == CLS_CACHED)
616 cl_use_try(env, lock, 1);
617 if (lock->cll_state == CLS_HELD) {
618 cl_lock_mutex_put(env, lock);
619 cl_lock_lockdep_acquire(env, lock, 0);
620 cl_lock_put(env, lock);
622 cl_unuse_try(env, lock);
623 cl_lock_unhold(env, lock, scope, source);
624 cl_lock_mutex_put(env, lock);
625 cl_lock_put(env, lock);
631 EXPORT_SYMBOL(cl_lock_peek);
634 * Returns a slice within a lock, corresponding to the given layer in the
639 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
640 const struct lu_device_type *dtype)
642 const struct cl_lock_slice *slice;
644 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
647 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
648 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
653 EXPORT_SYMBOL(cl_lock_at);
655 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
657 struct cl_thread_counters *counters;
659 counters = cl_lock_counters(env, lock);
661 counters->ctc_nr_locks_locked++;
662 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
663 cl_lock_trace(D_TRACE, env, "got mutex", lock);
667 * Locks cl_lock object.
669 * This is used to manipulate cl_lock fields, and to serialize state
670 * transitions in the lock state machine.
672 * \post cl_lock_is_mutexed(lock)
674 * \see cl_lock_mutex_put()
676 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
678 LINVRNT(cl_lock_invariant(env, lock));
680 if (lock->cll_guarder == current) {
681 LINVRNT(cl_lock_is_mutexed(lock));
682 LINVRNT(lock->cll_depth > 0);
684 struct cl_object_header *hdr;
685 struct cl_thread_info *info;
688 LINVRNT(lock->cll_guarder != current);
689 hdr = cl_object_header(lock->cll_descr.cld_obj);
691 * Check that mutices are taken in the bottom-to-top order.
693 info = cl_env_info(env);
694 for (i = 0; i < hdr->coh_nesting; ++i)
695 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
696 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
697 lock->cll_guarder = current;
698 LINVRNT(lock->cll_depth == 0);
700 cl_lock_mutex_tail(env, lock);
702 EXPORT_SYMBOL(cl_lock_mutex_get);
705 * Try-locks cl_lock object.
707 * \retval 0 \a lock was successfully locked
709 * \retval -EBUSY \a lock cannot be locked right now
711 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
713 * \see cl_lock_mutex_get()
715 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
719 LINVRNT(cl_lock_invariant_trusted(env, lock));
723 if (lock->cll_guarder == current) {
724 LINVRNT(lock->cll_depth > 0);
725 cl_lock_mutex_tail(env, lock);
726 } else if (mutex_trylock(&lock->cll_guard)) {
727 LINVRNT(lock->cll_depth == 0);
728 lock->cll_guarder = current;
729 cl_lock_mutex_tail(env, lock);
734 EXPORT_SYMBOL(cl_lock_mutex_try);
737 {* Unlocks cl_lock object.
739 * \pre cl_lock_is_mutexed(lock)
741 * \see cl_lock_mutex_get()
743 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
745 struct cl_thread_counters *counters;
747 LINVRNT(cl_lock_invariant(env, lock));
748 LINVRNT(cl_lock_is_mutexed(lock));
749 LINVRNT(lock->cll_guarder == current);
750 LINVRNT(lock->cll_depth > 0);
752 counters = cl_lock_counters(env, lock);
753 LINVRNT(counters->ctc_nr_locks_locked > 0);
755 cl_lock_trace(D_TRACE, env, "put mutex", lock);
756 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
757 counters->ctc_nr_locks_locked--;
758 if (--lock->cll_depth == 0) {
759 lock->cll_guarder = NULL;
760 mutex_unlock(&lock->cll_guard);
763 EXPORT_SYMBOL(cl_lock_mutex_put);
766 * Returns true iff lock's mutex is owned by the current thread.
768 int cl_lock_is_mutexed(struct cl_lock *lock)
770 return lock->cll_guarder == current;
772 EXPORT_SYMBOL(cl_lock_is_mutexed);
775 * Returns number of cl_lock mutices held by the current thread (environment).
777 int cl_lock_nr_mutexed(const struct lu_env *env)
779 struct cl_thread_info *info;
784 * NOTE: if summation across all nesting levels (currently 2) proves
785 * too expensive, a summary counter can be added to
786 * struct cl_thread_info.
788 info = cl_env_info(env);
789 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
790 locked += info->clt_counters[i].ctc_nr_locks_locked;
793 EXPORT_SYMBOL(cl_lock_nr_mutexed);
795 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
797 LINVRNT(cl_lock_is_mutexed(lock));
798 LINVRNT(cl_lock_invariant(env, lock));
800 if (!(lock->cll_flags & CLF_CANCELLED)) {
801 const struct cl_lock_slice *slice;
803 lock->cll_flags |= CLF_CANCELLED;
804 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
806 if (slice->cls_ops->clo_cancel != NULL)
807 slice->cls_ops->clo_cancel(env, slice);
813 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
815 struct cl_object_header *head;
816 const struct cl_lock_slice *slice;
818 LINVRNT(cl_lock_is_mutexed(lock));
819 LINVRNT(cl_lock_invariant(env, lock));
822 if (lock->cll_state < CLS_FREEING) {
823 LASSERT(lock->cll_state != CLS_INTRANSIT);
824 cl_lock_state_set(env, lock, CLS_FREEING);
826 head = cl_object_header(lock->cll_descr.cld_obj);
828 spin_lock(&head->coh_lock_guard);
829 cfs_list_del_init(&lock->cll_linkage);
830 spin_unlock(&head->coh_lock_guard);
833 * From now on, no new references to this lock can be acquired
834 * by cl_lock_lookup().
836 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
838 if (slice->cls_ops->clo_delete != NULL)
839 slice->cls_ops->clo_delete(env, slice);
842 * From now on, no new references to this lock can be acquired
843 * by layer-specific means (like a pointer from struct
844 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
847 * Lock will be finally freed in cl_lock_put() when last of
848 * existing references goes away.
855 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
856 * top-lock (nesting == 0) accounts for this modification in the per-thread
857 * debugging counters. Sub-lock holds can be released by a thread different
858 * from one that acquired it.
860 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
863 struct cl_thread_counters *counters;
864 enum clt_nesting_level nesting;
866 lock->cll_holds += delta;
867 nesting = cl_lock_nesting(lock);
868 if (nesting == CNL_TOP) {
869 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
870 counters->ctc_nr_held += delta;
871 LASSERT(counters->ctc_nr_held >= 0);
876 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
877 * cl_lock_hold_mod() for the explanation of the debugging code.
879 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
882 struct cl_thread_counters *counters;
883 enum clt_nesting_level nesting;
885 lock->cll_users += delta;
886 nesting = cl_lock_nesting(lock);
887 if (nesting == CNL_TOP) {
888 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
889 counters->ctc_nr_used += delta;
890 LASSERT(counters->ctc_nr_used >= 0);
894 void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
895 const char *scope, const void *source)
897 LINVRNT(cl_lock_is_mutexed(lock));
898 LINVRNT(cl_lock_invariant(env, lock));
899 LASSERT(lock->cll_holds > 0);
902 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
903 lu_ref_del(&lock->cll_holders, scope, source);
904 cl_lock_hold_mod(env, lock, -1);
905 if (lock->cll_holds == 0) {
906 CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
907 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
908 lock->cll_descr.cld_mode == CLM_GROUP ||
909 lock->cll_state != CLS_CACHED)
911 * If lock is still phantom or grouplock when user is
912 * done with it---destroy the lock.
914 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
915 if (lock->cll_flags & CLF_CANCELPEND) {
916 lock->cll_flags &= ~CLF_CANCELPEND;
917 cl_lock_cancel0(env, lock);
919 if (lock->cll_flags & CLF_DOOMED) {
920 /* no longer doomed: it's dead... Jim. */
921 lock->cll_flags &= ~CLF_DOOMED;
922 cl_lock_delete0(env, lock);
927 EXPORT_SYMBOL(cl_lock_hold_release);
930 * Waits until lock state is changed.
932 * This function is called with cl_lock mutex locked, atomically releases
933 * mutex and goes to sleep, waiting for a lock state change (signaled by
934 * cl_lock_signal()), and re-acquires the mutex before return.
936 * This function is used to wait until lock state machine makes some progress
937 * and to emulate synchronous operations on top of asynchronous lock
940 * \retval -EINTR wait was interrupted
942 * \retval 0 wait wasn't interrupted
944 * \pre cl_lock_is_mutexed(lock)
946 * \see cl_lock_signal()
948 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
955 LINVRNT(cl_lock_is_mutexed(lock));
956 LINVRNT(cl_lock_invariant(env, lock));
957 LASSERT(lock->cll_depth == 1);
958 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
960 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
961 result = lock->cll_error;
963 /* To avoid being interrupted by the 'non-fatal' signals
964 * (SIGCHLD, for instance), we'd block them temporarily.
966 blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
968 init_waitqueue_entry_current(&waiter);
969 add_wait_queue(&lock->cll_wq, &waiter);
970 set_current_state(TASK_INTERRUPTIBLE);
971 cl_lock_mutex_put(env, lock);
973 LASSERT(cl_lock_nr_mutexed(env) == 0);
975 /* Returning ERESTARTSYS instead of EINTR so syscalls
976 * can be restarted if signals are pending here */
977 result = -ERESTARTSYS;
978 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
979 waitq_wait(&waiter, TASK_INTERRUPTIBLE);
980 if (!cfs_signal_pending())
984 cl_lock_mutex_get(env, lock);
985 set_current_state(TASK_RUNNING);
986 remove_wait_queue(&lock->cll_wq, &waiter);
988 /* Restore old blocked signals */
989 cfs_restore_sigs(blocked);
993 EXPORT_SYMBOL(cl_lock_state_wait);
995 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
996 enum cl_lock_state state)
998 const struct cl_lock_slice *slice;
1001 LINVRNT(cl_lock_is_mutexed(lock));
1002 LINVRNT(cl_lock_invariant(env, lock));
1004 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
1005 if (slice->cls_ops->clo_state != NULL)
1006 slice->cls_ops->clo_state(env, slice, state);
1007 wake_up_all(&lock->cll_wq);
1012 * Notifies waiters that lock state changed.
1014 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1015 * layers about state change by calling cl_lock_operations::clo_state()
1018 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1021 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1022 cl_lock_state_signal(env, lock, lock->cll_state);
1025 EXPORT_SYMBOL(cl_lock_signal);
1028 * Changes lock state.
1030 * This function is invoked to notify layers that lock state changed, possible
1031 * as a result of an asynchronous event such as call-back reception.
1033 * \post lock->cll_state == state
1035 * \see cl_lock_operations::clo_state()
1037 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1038 enum cl_lock_state state)
1041 LASSERT(lock->cll_state <= state ||
1042 (lock->cll_state == CLS_CACHED &&
1043 (state == CLS_HELD || /* lock found in cache */
1044 state == CLS_NEW || /* sub-lock canceled */
1045 state == CLS_INTRANSIT)) ||
1046 /* lock is in transit state */
1047 lock->cll_state == CLS_INTRANSIT);
1049 if (lock->cll_state != state) {
1050 CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
1051 CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
1053 cl_lock_state_signal(env, lock, state);
1054 lock->cll_state = state;
1058 EXPORT_SYMBOL(cl_lock_state_set);
1060 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1062 const struct cl_lock_slice *slice;
1068 LINVRNT(cl_lock_is_mutexed(lock));
1069 LINVRNT(cl_lock_invariant(env, lock));
1070 LASSERT(lock->cll_state == CLS_INTRANSIT);
1073 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1075 if (slice->cls_ops->clo_unuse != NULL) {
1076 result = slice->cls_ops->clo_unuse(env, slice);
1081 LASSERT(result != -ENOSYS);
1082 } while (result == CLO_REPEAT);
1088 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1089 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1090 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1091 * use process atomic
1093 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1095 const struct cl_lock_slice *slice;
1097 enum cl_lock_state state;
1100 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1102 LASSERT(lock->cll_state == CLS_CACHED);
1103 if (lock->cll_error)
1104 RETURN(lock->cll_error);
1107 state = cl_lock_intransit(env, lock);
1108 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1109 if (slice->cls_ops->clo_use != NULL) {
1110 result = slice->cls_ops->clo_use(env, slice);
1115 LASSERT(result != -ENOSYS);
1117 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1123 if (result == -ESTALE) {
1125 * ESTALE means sublock being cancelled
1126 * at this time, and set lock state to
1127 * be NEW here and ask the caller to repeat.
1130 result = CLO_REPEAT;
1133 /* @atomic means back-off-on-failure. */
1136 rc = cl_unuse_try_internal(env, lock);
1137 /* Vet the results. */
1138 if (rc < 0 && result > 0)
1143 cl_lock_extransit(env, lock, state);
1146 EXPORT_SYMBOL(cl_use_try);
1149 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1152 static int cl_enqueue_kick(const struct lu_env *env,
1153 struct cl_lock *lock,
1154 struct cl_io *io, __u32 flags)
1157 const struct cl_lock_slice *slice;
1161 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1162 if (slice->cls_ops->clo_enqueue != NULL) {
1163 result = slice->cls_ops->clo_enqueue(env,
1169 LASSERT(result != -ENOSYS);
1174 * Tries to enqueue a lock.
1176 * This function is called repeatedly by cl_enqueue() until either lock is
1177 * enqueued, or error occurs. This function does not block waiting for
1178 * networking communication to complete.
1180 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1181 * lock->cll_state == CLS_HELD)
1183 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1184 * \see cl_lock_state::CLS_ENQUEUED
1186 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1187 struct cl_io *io, __u32 flags)
1192 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1194 LINVRNT(cl_lock_is_mutexed(lock));
1196 result = lock->cll_error;
1200 switch (lock->cll_state) {
1202 cl_lock_state_set(env, lock, CLS_QUEUING);
1206 result = cl_enqueue_kick(env, lock, io, flags);
1207 /* For AGL case, the cl_lock::cll_state may
1208 * become CLS_HELD already. */
1209 if (result == 0 && lock->cll_state == CLS_QUEUING)
1210 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1213 LASSERT(cl_lock_is_intransit(lock));
1217 /* yank lock from the cache. */
1218 result = cl_use_try(env, lock, 0);
1227 * impossible, only held locks with increased
1228 * ->cll_holds can be enqueued, and they cannot be
1233 } while (result == CLO_REPEAT);
1236 EXPORT_SYMBOL(cl_enqueue_try);
1239 * Cancel the conflicting lock found during previous enqueue.
1241 * \retval 0 conflicting lock has been canceled.
1242 * \retval -ve error code.
1244 int cl_lock_enqueue_wait(const struct lu_env *env,
1245 struct cl_lock *lock,
1248 struct cl_lock *conflict;
1252 LASSERT(cl_lock_is_mutexed(lock));
1253 LASSERT(lock->cll_state == CLS_QUEUING);
1254 LASSERT(lock->cll_conflict != NULL);
1256 conflict = lock->cll_conflict;
1257 lock->cll_conflict = NULL;
1259 cl_lock_mutex_put(env, lock);
1260 LASSERT(cl_lock_nr_mutexed(env) == 0);
1262 cl_lock_mutex_get(env, conflict);
1263 cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
1264 cl_lock_cancel(env, conflict);
1265 cl_lock_delete(env, conflict);
1267 while (conflict->cll_state != CLS_FREEING) {
1268 rc = cl_lock_state_wait(env, conflict);
1272 cl_lock_mutex_put(env, conflict);
1273 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1274 cl_lock_put(env, conflict);
1277 cl_lock_mutex_get(env, lock);
1282 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1284 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1285 struct cl_io *io, __u32 enqflags)
1291 LINVRNT(cl_lock_is_mutexed(lock));
1292 LINVRNT(cl_lock_invariant(env, lock));
1293 LASSERT(lock->cll_holds > 0);
1295 cl_lock_user_add(env, lock);
1297 result = cl_enqueue_try(env, lock, io, enqflags);
1298 if (result == CLO_WAIT) {
1299 if (lock->cll_conflict != NULL)
1300 result = cl_lock_enqueue_wait(env, lock, 1);
1302 result = cl_lock_state_wait(env, lock);
1309 cl_unuse_try(env, lock);
1310 LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
1311 lock->cll_state == CLS_ENQUEUED ||
1312 lock->cll_state == CLS_HELD));
1319 * \pre current thread or io owns a hold on lock.
1321 * \post ergo(result == 0, lock->users increased)
1322 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1323 * lock->cll_state == CLS_HELD)
1325 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1326 struct cl_io *io, __u32 enqflags)
1332 cl_lock_lockdep_acquire(env, lock, enqflags);
1333 cl_lock_mutex_get(env, lock);
1334 result = cl_enqueue_locked(env, lock, io, enqflags);
1335 cl_lock_mutex_put(env, lock);
1337 cl_lock_lockdep_release(env, lock);
1338 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1339 lock->cll_state == CLS_HELD));
1342 EXPORT_SYMBOL(cl_enqueue);
1345 * Tries to unlock a lock.
1347 * This function is called to release underlying resource:
1348 * 1. for top lock, the resource is sublocks it held;
1349 * 2. for sublock, the resource is the reference to dlmlock.
1351 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1353 * \see cl_unuse() cl_lock_operations::clo_unuse()
1354 * \see cl_lock_state::CLS_CACHED
1356 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1359 enum cl_lock_state state = CLS_NEW;
1362 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1364 if (lock->cll_users > 1) {
1365 cl_lock_user_del(env, lock);
1369 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1370 * underlying resources. */
1371 if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
1372 cl_lock_user_del(env, lock);
1377 * New lock users (->cll_users) are not protecting unlocking
1378 * from proceeding. From this point, lock eventually reaches
1379 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1382 state = cl_lock_intransit(env, lock);
1384 result = cl_unuse_try_internal(env, lock);
1385 LASSERT(lock->cll_state == CLS_INTRANSIT);
1386 LASSERT(result != CLO_WAIT);
1387 cl_lock_user_del(env, lock);
1388 if (result == 0 || result == -ESTALE) {
1390 * Return lock back to the cache. This is the only
1391 * place where lock is moved into CLS_CACHED state.
1393 * If one of ->clo_unuse() methods returned -ESTALE, lock
1394 * cannot be placed into cache and has to be
1395 * re-initialized. This happens e.g., when a sub-lock was
1396 * canceled while unlocking was in progress.
1398 if (state == CLS_HELD && result == 0)
1402 cl_lock_extransit(env, lock, state);
1405 * Hide -ESTALE error.
1406 * If the lock is a glimpse lock, and it has multiple
1407 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1408 * and other sublocks are matched write locks. In this case,
1409 * we can't set this lock to error because otherwise some of
1410 * its sublocks may not be canceled. This causes some dirty
1411 * pages won't be written to OSTs. -jay
1415 CERROR("result = %d, this is unlikely!\n", result);
1417 cl_lock_extransit(env, lock, state);
1419 RETURN(result ?: lock->cll_error);
1421 EXPORT_SYMBOL(cl_unuse_try);
1423 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1428 result = cl_unuse_try(env, lock);
1430 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1438 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1441 cl_lock_mutex_get(env, lock);
1442 cl_unuse_locked(env, lock);
1443 cl_lock_mutex_put(env, lock);
1444 cl_lock_lockdep_release(env, lock);
1447 EXPORT_SYMBOL(cl_unuse);
1450 * Tries to wait for a lock.
1452 * This function is called repeatedly by cl_wait() until either lock is
1453 * granted, or error occurs. This function does not block waiting for network
1454 * communication to complete.
1456 * \see cl_wait() cl_lock_operations::clo_wait()
1457 * \see cl_lock_state::CLS_HELD
1459 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1461 const struct cl_lock_slice *slice;
1465 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1467 LINVRNT(cl_lock_is_mutexed(lock));
1468 LINVRNT(cl_lock_invariant(env, lock));
1469 LASSERTF(lock->cll_state == CLS_QUEUING ||
1470 lock->cll_state == CLS_ENQUEUED ||
1471 lock->cll_state == CLS_HELD ||
1472 lock->cll_state == CLS_INTRANSIT,
1473 "lock state: %d\n", lock->cll_state);
1474 LASSERT(lock->cll_users > 0);
1475 LASSERT(lock->cll_holds > 0);
1477 result = lock->cll_error;
1481 if (cl_lock_is_intransit(lock)) {
1486 if (lock->cll_state == CLS_HELD)
1491 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1492 if (slice->cls_ops->clo_wait != NULL) {
1493 result = slice->cls_ops->clo_wait(env, slice);
1498 LASSERT(result != -ENOSYS);
1500 LASSERT(lock->cll_state != CLS_INTRANSIT);
1501 cl_lock_state_set(env, lock, CLS_HELD);
1503 } while (result == CLO_REPEAT);
1506 EXPORT_SYMBOL(cl_wait_try);
1509 * Waits until enqueued lock is granted.
1511 * \pre current thread or io owns a hold on the lock
1512 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1513 * lock->cll_state == CLS_HELD)
1515 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1517 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1522 cl_lock_mutex_get(env, lock);
1524 LINVRNT(cl_lock_invariant(env, lock));
1525 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1526 "Wrong state %d \n", lock->cll_state);
1527 LASSERT(lock->cll_holds > 0);
1530 result = cl_wait_try(env, lock);
1531 if (result == CLO_WAIT) {
1532 result = cl_lock_state_wait(env, lock);
1539 cl_unuse_try(env, lock);
1540 cl_lock_lockdep_release(env, lock);
1542 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1543 cl_lock_mutex_put(env, lock);
1544 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1547 EXPORT_SYMBOL(cl_wait);
1550 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1553 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1555 const struct cl_lock_slice *slice;
1556 unsigned long pound;
1557 unsigned long ounce;
1560 LINVRNT(cl_lock_is_mutexed(lock));
1561 LINVRNT(cl_lock_invariant(env, lock));
1564 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1565 if (slice->cls_ops->clo_weigh != NULL) {
1566 ounce = slice->cls_ops->clo_weigh(env, slice);
1568 if (pound < ounce) /* over-weight^Wflow */
1574 EXPORT_SYMBOL(cl_lock_weigh);
1577 * Notifies layers that lock description changed.
1579 * The server can grant client a lock different from one that was requested
1580 * (e.g., larger in extent). This method is called when actually granted lock
1581 * description becomes known to let layers to accommodate for changed lock
1584 * \see cl_lock_operations::clo_modify()
1586 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1587 const struct cl_lock_descr *desc)
1589 const struct cl_lock_slice *slice;
1590 struct cl_object *obj = lock->cll_descr.cld_obj;
1591 struct cl_object_header *hdr = cl_object_header(obj);
1595 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1596 /* don't allow object to change */
1597 LASSERT(obj == desc->cld_obj);
1598 LINVRNT(cl_lock_is_mutexed(lock));
1599 LINVRNT(cl_lock_invariant(env, lock));
1601 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1602 if (slice->cls_ops->clo_modify != NULL) {
1603 result = slice->cls_ops->clo_modify(env, slice, desc);
1608 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1609 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1611 * Just replace description in place. Nothing more is needed for
1612 * now. If locks were indexed according to their extent and/or mode,
1613 * that index would have to be updated here.
1615 spin_lock(&hdr->coh_lock_guard);
1616 lock->cll_descr = *desc;
1617 spin_unlock(&hdr->coh_lock_guard);
1620 EXPORT_SYMBOL(cl_lock_modify);
1623 * Initializes lock closure with a given origin.
1625 * \see cl_lock_closure
1627 void cl_lock_closure_init(const struct lu_env *env,
1628 struct cl_lock_closure *closure,
1629 struct cl_lock *origin, int wait)
1631 LINVRNT(cl_lock_is_mutexed(origin));
1632 LINVRNT(cl_lock_invariant(env, origin));
1634 CFS_INIT_LIST_HEAD(&closure->clc_list);
1635 closure->clc_origin = origin;
1636 closure->clc_wait = wait;
1637 closure->clc_nr = 0;
1639 EXPORT_SYMBOL(cl_lock_closure_init);
1642 * Builds a closure of \a lock.
1644 * Building of a closure consists of adding initial lock (\a lock) into it,
1645 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1646 * methods might call cl_lock_closure_build() recursively again, adding more
1647 * locks to the closure, etc.
1649 * \see cl_lock_closure
1651 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1652 struct cl_lock_closure *closure)
1654 const struct cl_lock_slice *slice;
1658 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1659 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1661 result = cl_lock_enclosure(env, lock, closure);
1663 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1664 if (slice->cls_ops->clo_closure != NULL) {
1665 result = slice->cls_ops->clo_closure(env, slice,
1673 cl_lock_disclosure(env, closure);
1676 EXPORT_SYMBOL(cl_lock_closure_build);
1679 * Adds new lock to a closure.
1681 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1682 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1683 * until next try-lock is likely to succeed.
1685 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1686 struct cl_lock_closure *closure)
1690 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1691 if (!cl_lock_mutex_try(env, lock)) {
1693 * If lock->cll_inclosure is not empty, lock is already in
1696 if (cfs_list_empty(&lock->cll_inclosure)) {
1697 cl_lock_get_trust(lock);
1698 lu_ref_add(&lock->cll_reference, "closure", closure);
1699 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1702 cl_lock_mutex_put(env, lock);
1705 cl_lock_disclosure(env, closure);
1706 if (closure->clc_wait) {
1707 cl_lock_get_trust(lock);
1708 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1709 cl_lock_mutex_put(env, closure->clc_origin);
1711 LASSERT(cl_lock_nr_mutexed(env) == 0);
1712 cl_lock_mutex_get(env, lock);
1713 cl_lock_mutex_put(env, lock);
1715 cl_lock_mutex_get(env, closure->clc_origin);
1716 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1717 cl_lock_put(env, lock);
1719 result = CLO_REPEAT;
1723 EXPORT_SYMBOL(cl_lock_enclosure);
1725 /** Releases mutices of enclosed locks. */
1726 void cl_lock_disclosure(const struct lu_env *env,
1727 struct cl_lock_closure *closure)
1729 struct cl_lock *scan;
1730 struct cl_lock *temp;
1732 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1733 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1735 cfs_list_del_init(&scan->cll_inclosure);
1736 cl_lock_mutex_put(env, scan);
1737 lu_ref_del(&scan->cll_reference, "closure", closure);
1738 cl_lock_put(env, scan);
1741 LASSERT(closure->clc_nr == 0);
1743 EXPORT_SYMBOL(cl_lock_disclosure);
1745 /** Finalizes a closure. */
1746 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1748 LASSERT(closure->clc_nr == 0);
1749 LASSERT(cfs_list_empty(&closure->clc_list));
1751 EXPORT_SYMBOL(cl_lock_closure_fini);
1754 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1755 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1756 * destruction until all holds are released. This is called when a decision is
1757 * made to destroy the lock in the future. E.g., when a blocking AST is
1758 * received on it, or fatal communication error happens.
1760 * Caller must have a reference on this lock to prevent a situation, when
1761 * deleted lock lingers in memory for indefinite time, because nobody calls
1762 * cl_lock_put() to finish it.
1764 * \pre atomic_read(&lock->cll_ref) > 0
1765 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1766 * cl_lock_nr_mutexed(env) == 1)
1767 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1768 * held, as deletion of sub-locks might require releasing a top-lock
1771 * \see cl_lock_operations::clo_delete()
1772 * \see cl_lock::cll_holds
1774 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1776 LINVRNT(cl_lock_is_mutexed(lock));
1777 LINVRNT(cl_lock_invariant(env, lock));
1778 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1779 cl_lock_nr_mutexed(env) == 1));
1782 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1783 if (lock->cll_holds == 0)
1784 cl_lock_delete0(env, lock);
1786 lock->cll_flags |= CLF_DOOMED;
1789 EXPORT_SYMBOL(cl_lock_delete);
1792 * Mark lock as irrecoverably failed, and mark it for destruction. This
1793 * happens when, e.g., server fails to grant a lock to us, or networking
1796 * \pre atomic_read(&lock->cll_ref) > 0
1798 * \see clo_lock_delete()
1799 * \see cl_lock::cll_holds
1801 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1803 LINVRNT(cl_lock_is_mutexed(lock));
1804 LINVRNT(cl_lock_invariant(env, lock));
1807 if (lock->cll_error == 0 && error != 0) {
1808 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1809 lock->cll_error = error;
1810 cl_lock_signal(env, lock);
1811 cl_lock_cancel(env, lock);
1812 cl_lock_delete(env, lock);
1816 EXPORT_SYMBOL(cl_lock_error);
1819 * Cancels this lock. Notifies layers
1820 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1821 * there are holds on the lock, postpone cancellation until
1822 * all holds are released.
1824 * Cancellation notification is delivered to layers at most once.
1826 * \see cl_lock_operations::clo_cancel()
1827 * \see cl_lock::cll_holds
1829 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1831 LINVRNT(cl_lock_is_mutexed(lock));
1832 LINVRNT(cl_lock_invariant(env, lock));
1835 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1836 if (lock->cll_holds == 0)
1837 cl_lock_cancel0(env, lock);
1839 lock->cll_flags |= CLF_CANCELPEND;
1842 EXPORT_SYMBOL(cl_lock_cancel);
1845 * Finds an existing lock covering given index and optionally different from a
1846 * given \a except lock.
1848 struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
1849 struct cl_object *obj, pgoff_t index,
1850 struct cl_lock *except,
1851 int pending, int canceld)
1853 struct cl_object_header *head;
1854 struct cl_lock *scan;
1855 struct cl_lock *lock;
1856 struct cl_lock_descr *need;
1860 head = cl_object_header(obj);
1861 need = &cl_env_info(env)->clt_descr;
1864 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1866 need->cld_start = need->cld_end = index;
1867 need->cld_enq_flags = 0;
1869 spin_lock(&head->coh_lock_guard);
1870 /* It is fine to match any group lock since there could be only one
1871 * with a uniq gid and it conflicts with all other lock modes too */
1872 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1873 if (scan != except &&
1874 (scan->cll_descr.cld_mode == CLM_GROUP ||
1875 cl_lock_ext_match(&scan->cll_descr, need)) &&
1876 scan->cll_state >= CLS_HELD &&
1877 scan->cll_state < CLS_FREEING &&
1879 * This check is racy as the lock can be canceled right
1880 * after it is done, but this is fine, because page exists
1883 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1884 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1885 /* Don't increase cs_hit here since this
1886 * is just a helper function. */
1887 cl_lock_get_trust(scan);
1892 spin_unlock(&head->coh_lock_guard);
1895 EXPORT_SYMBOL(cl_lock_at_pgoff);
1898 * Eliminate all locks for a given object.
1900 * Caller has to guarantee that no lock is in active use.
1902 * \param cancel when this is set, cl_locks_prune() cancels locks before
1905 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1907 struct cl_object_header *head;
1908 struct cl_lock *lock;
1911 head = cl_object_header(obj);
1913 spin_lock(&head->coh_lock_guard);
1914 while (!cfs_list_empty(&head->coh_locks)) {
1915 lock = container_of(head->coh_locks.next,
1916 struct cl_lock, cll_linkage);
1917 cl_lock_get_trust(lock);
1918 spin_unlock(&head->coh_lock_guard);
1919 lu_ref_add(&lock->cll_reference, "prune", current);
1922 cl_lock_mutex_get(env, lock);
1923 if (lock->cll_state < CLS_FREEING) {
1924 LASSERT(lock->cll_users <= 1);
1925 if (unlikely(lock->cll_users == 1)) {
1926 struct l_wait_info lwi = { 0 };
1928 cl_lock_mutex_put(env, lock);
1929 l_wait_event(lock->cll_wq,
1930 lock->cll_users == 0,
1936 cl_lock_cancel(env, lock);
1937 cl_lock_delete(env, lock);
1939 cl_lock_mutex_put(env, lock);
1940 lu_ref_del(&lock->cll_reference, "prune", current);
1941 cl_lock_put(env, lock);
1942 spin_lock(&head->coh_lock_guard);
1944 spin_unlock(&head->coh_lock_guard);
1947 EXPORT_SYMBOL(cl_locks_prune);
1949 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1950 const struct cl_io *io,
1951 const struct cl_lock_descr *need,
1952 const char *scope, const void *source)
1954 struct cl_lock *lock;
1959 lock = cl_lock_find(env, io, need);
1962 cl_lock_mutex_get(env, lock);
1963 if (lock->cll_state < CLS_FREEING &&
1964 !(lock->cll_flags & CLF_CANCELLED)) {
1965 cl_lock_hold_mod(env, lock, +1);
1966 lu_ref_add(&lock->cll_holders, scope, source);
1967 lu_ref_add(&lock->cll_reference, scope, source);
1970 cl_lock_mutex_put(env, lock);
1971 cl_lock_put(env, lock);
1977 * Returns a lock matching \a need description with a reference and a hold on
1980 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
1981 * guarantees that lock is not in the CLS_FREEING state on return.
1983 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
1984 const struct cl_lock_descr *need,
1985 const char *scope, const void *source)
1987 struct cl_lock *lock;
1991 lock = cl_lock_hold_mutex(env, io, need, scope, source);
1993 cl_lock_mutex_put(env, lock);
1996 EXPORT_SYMBOL(cl_lock_hold);
1999 * Main high-level entry point of cl_lock interface that finds existing or
2000 * enqueues new lock matching given description.
2002 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2003 const struct cl_lock_descr *need,
2004 const char *scope, const void *source)
2006 struct cl_lock *lock;
2008 __u32 enqflags = need->cld_enq_flags;
2012 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2016 rc = cl_enqueue_locked(env, lock, io, enqflags);
2018 if (cl_lock_fits_into(env, lock, need, io)) {
2019 if (!(enqflags & CEF_AGL)) {
2020 cl_lock_mutex_put(env, lock);
2021 cl_lock_lockdep_acquire(env, lock,
2027 cl_unuse_locked(env, lock);
2029 cl_lock_trace(D_DLMTRACE, env,
2030 rc <= 0 ? "enqueue failed" : "agl succeed", lock);
2031 cl_lock_hold_release(env, lock, scope, source);
2032 cl_lock_mutex_put(env, lock);
2033 lu_ref_del(&lock->cll_reference, scope, source);
2034 cl_lock_put(env, lock);
2036 LASSERT(enqflags & CEF_AGL);
2038 } else if (rc != 0) {
2044 EXPORT_SYMBOL(cl_lock_request);
2047 * Adds a hold to a known lock.
2049 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2050 const char *scope, const void *source)
2052 LINVRNT(cl_lock_is_mutexed(lock));
2053 LINVRNT(cl_lock_invariant(env, lock));
2054 LASSERT(lock->cll_state != CLS_FREEING);
2057 cl_lock_hold_mod(env, lock, +1);
2059 lu_ref_add(&lock->cll_holders, scope, source);
2060 lu_ref_add(&lock->cll_reference, scope, source);
2063 EXPORT_SYMBOL(cl_lock_hold_add);
2066 * Releases a hold and a reference on a lock, on which caller acquired a
2069 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2070 const char *scope, const void *source)
2072 LINVRNT(cl_lock_invariant(env, lock));
2074 cl_lock_hold_release(env, lock, scope, source);
2075 lu_ref_del(&lock->cll_reference, scope, source);
2076 cl_lock_put(env, lock);
2079 EXPORT_SYMBOL(cl_lock_unhold);
2082 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2084 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2085 const char *scope, const void *source)
2087 LINVRNT(cl_lock_invariant(env, lock));
2089 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2090 cl_lock_mutex_get(env, lock);
2091 cl_lock_hold_release(env, lock, scope, source);
2092 cl_lock_mutex_put(env, lock);
2093 lu_ref_del(&lock->cll_reference, scope, source);
2094 cl_lock_put(env, lock);
2097 EXPORT_SYMBOL(cl_lock_release);
2099 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2101 LINVRNT(cl_lock_is_mutexed(lock));
2102 LINVRNT(cl_lock_invariant(env, lock));
2105 cl_lock_used_mod(env, lock, +1);
2108 EXPORT_SYMBOL(cl_lock_user_add);
2110 void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2112 LINVRNT(cl_lock_is_mutexed(lock));
2113 LINVRNT(cl_lock_invariant(env, lock));
2114 LASSERT(lock->cll_users > 0);
2117 cl_lock_used_mod(env, lock, -1);
2118 if (lock->cll_users == 0)
2119 wake_up_all(&lock->cll_wq);
2122 EXPORT_SYMBOL(cl_lock_user_del);
2124 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2126 static const char *names[] = {
2127 [CLM_PHANTOM] = "P",
2132 if (0 <= mode && mode < ARRAY_SIZE(names))
2137 EXPORT_SYMBOL(cl_lock_mode_name);
2140 * Prints human readable representation of a lock description.
2142 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2143 lu_printer_t printer,
2144 const struct cl_lock_descr *descr)
2146 const struct lu_fid *fid;
2148 fid = lu_object_fid(&descr->cld_obj->co_lu);
2149 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2151 EXPORT_SYMBOL(cl_lock_descr_print);
2154 * Prints human readable representation of \a lock to the \a f.
2156 void cl_lock_print(const struct lu_env *env, void *cookie,
2157 lu_printer_t printer, const struct cl_lock *lock)
2159 const struct cl_lock_slice *slice;
2160 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2161 lock, cfs_atomic_read(&lock->cll_ref),
2162 lock->cll_state, lock->cll_error, lock->cll_holds,
2163 lock->cll_users, lock->cll_flags);
2164 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2165 (*printer)(env, cookie, " {\n");
2167 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2168 (*printer)(env, cookie, " %s@%p: ",
2169 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2171 if (slice->cls_ops->clo_print != NULL)
2172 slice->cls_ops->clo_print(env, cookie, printer, slice);
2173 (*printer)(env, cookie, "\n");
2175 (*printer)(env, cookie, "} lock@%p\n", lock);
2177 EXPORT_SYMBOL(cl_lock_print);
2179 int cl_lock_init(void)
2181 return lu_kmem_init(cl_lock_caches);
2184 void cl_lock_fini(void)
2186 lu_kmem_fini(cl_lock_caches);