1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /** Lock class of cl_lock::cll_guard */
57 static struct lock_class_key cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
60 static struct lu_kmem_descr cl_lock_caches[] = {
62 .ckd_cache = &cl_lock_kmem,
63 .ckd_name = "cl_lock_kmem",
64 .ckd_size = sizeof (struct cl_lock)
72 * Basic lock invariant that is maintained at all times. Caller either has a
73 * reference to \a lock, or somehow assures that \a lock cannot be freed.
75 * \see cl_lock_invariant()
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78 const struct cl_lock *lock)
82 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84 lock->cll_holds >= lock->cll_users &&
85 lock->cll_holds >= 0 &&
86 lock->cll_users >= 0 &&
91 * Stronger lock invariant, checking that caller has a reference on a lock.
93 * \see cl_lock_invariant_trusted()
95 static int cl_lock_invariant(const struct lu_env *env,
96 const struct cl_lock *lock)
100 result = atomic_read(&lock->cll_ref) > 0 &&
101 cl_lock_invariant_trusted(env, lock);
102 if (!result && env != NULL)
103 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
108 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
112 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
116 * Returns a set of counters for this lock, depending on a lock nesting.
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119 const struct cl_lock *lock)
121 struct cl_thread_info *info;
122 enum clt_nesting_level nesting;
124 info = cl_env_info(env);
125 nesting = cl_lock_nesting(lock);
126 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127 return &info->clt_counters[nesting];
130 #define RETIP ((unsigned long)__builtin_return_address(0))
132 #ifdef CONFIG_LOCKDEP
133 static struct lock_class_key cl_lock_key;
135 static void cl_lock_lockdep_init(struct cl_lock *lock)
137 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
140 static void cl_lock_lockdep_acquire(const struct lu_env *env,
141 struct cl_lock *lock, __u32 enqflags)
143 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
144 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
145 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
146 /* check: */ 2, RETIP);
149 static void cl_lock_lockdep_release(const struct lu_env *env,
150 struct cl_lock *lock)
152 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
153 lock_release(&lock->dep_map, 0, RETIP);
156 #else /* !CONFIG_LOCKDEP */
158 static void cl_lock_lockdep_init(struct cl_lock *lock)
160 static void cl_lock_lockdep_acquire(const struct lu_env *env,
161 struct cl_lock *lock, __u32 enqflags)
163 static void cl_lock_lockdep_release(const struct lu_env *env,
164 struct cl_lock *lock)
167 #endif /* !CONFIG_LOCKDEP */
170 * Adds lock slice to the compound lock.
172 * This is called by cl_object_operations::coo_lock_init() methods to add a
173 * per-layer state to the lock. New state is added at the end of
174 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
176 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
178 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
179 struct cl_object *obj,
180 const struct cl_lock_operations *ops)
183 slice->cls_lock = lock;
184 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
185 slice->cls_obj = obj;
186 slice->cls_ops = ops;
189 EXPORT_SYMBOL(cl_lock_slice_add);
192 * Returns true iff a lock with the mode \a has provides at least the same
193 * guarantees as a lock with the mode \a need.
195 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
197 LINVRNT(need == CLM_READ || need == CLM_WRITE || need == CLM_PHANTOM);
198 LINVRNT(has == CLM_READ || has == CLM_WRITE || has == CLM_PHANTOM);
199 CLASSERT(CLM_PHANTOM < CLM_READ);
200 CLASSERT(CLM_READ < CLM_WRITE);
204 EXPORT_SYMBOL(cl_lock_mode_match);
207 * Returns true iff extent portions of lock descriptions match.
209 int cl_lock_ext_match(const struct cl_lock_descr *has,
210 const struct cl_lock_descr *need)
213 has->cld_start <= need->cld_start &&
214 has->cld_end >= need->cld_end &&
215 cl_lock_mode_match(has->cld_mode, need->cld_mode);
217 EXPORT_SYMBOL(cl_lock_ext_match);
220 * Returns true iff a lock with the description \a has provides at least the
221 * same guarantees as a lock with the description \a need.
223 int cl_lock_descr_match(const struct cl_lock_descr *has,
224 const struct cl_lock_descr *need)
227 cl_object_same(has->cld_obj, need->cld_obj) &&
228 cl_lock_ext_match(has, need);
230 EXPORT_SYMBOL(cl_lock_descr_match);
232 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
234 struct cl_object *obj = lock->cll_descr.cld_obj;
236 LASSERT(cl_is_lock(lock));
237 LINVRNT(!cl_lock_is_mutexed(lock));
241 while (!list_empty(&lock->cll_layers)) {
242 struct cl_lock_slice *slice;
244 slice = list_entry(lock->cll_layers.next, struct cl_lock_slice,
246 list_del_init(lock->cll_layers.next);
247 slice->cls_ops->clo_fini(env, slice);
249 atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
250 atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
251 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
252 cl_object_put(env, obj);
253 lu_ref_fini(&lock->cll_reference);
254 lu_ref_fini(&lock->cll_holders);
255 mutex_destroy(&lock->cll_guard);
256 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
261 * Releases a reference on a lock.
263 * When last reference is released, lock is returned to the cache, unless it
264 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
267 * \see cl_object_put(), cl_page_put()
269 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
271 struct cl_object *obj;
272 struct cl_object_header *head;
273 struct cl_site *site;
275 LINVRNT(cl_lock_invariant(env, lock));
277 obj = lock->cll_descr.cld_obj;
278 LINVRNT(obj != NULL);
279 head = cl_object_header(obj);
280 site = cl_object_site(obj);
282 CDEBUG(D_DLMTRACE, "releasing reference: %d %p %lu\n",
283 atomic_read(&lock->cll_ref), lock, RETIP);
285 if (atomic_dec_and_test(&lock->cll_ref)) {
286 if (lock->cll_state == CLS_FREEING) {
287 LASSERT(list_empty(&lock->cll_linkage));
288 cl_lock_free(env, lock);
290 atomic_dec(&site->cs_locks.cs_busy);
294 EXPORT_SYMBOL(cl_lock_put);
297 * Acquires an additional reference to a lock.
299 * This can be called only by caller already possessing a reference to \a
302 * \see cl_object_get(), cl_page_get()
304 void cl_lock_get(struct cl_lock *lock)
306 LINVRNT(cl_lock_invariant(NULL, lock));
307 CDEBUG(D_DLMTRACE|D_TRACE, "acquiring reference: %d %p %lu\n",
308 atomic_read(&lock->cll_ref), lock, RETIP);
309 atomic_inc(&lock->cll_ref);
311 EXPORT_SYMBOL(cl_lock_get);
314 * Acquires a reference to a lock.
316 * This is much like cl_lock_get(), except that this function can be used to
317 * acquire initial reference to the cached lock. Caller has to deal with all
318 * possible races. Use with care!
320 * \see cl_page_get_trust()
322 void cl_lock_get_trust(struct cl_lock *lock)
324 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
326 LASSERT(cl_is_lock(lock));
327 CDEBUG(D_DLMTRACE|D_TRACE, "acquiring trusted reference: %d %p %lu\n",
328 atomic_read(&lock->cll_ref), lock, RETIP);
329 if (atomic_inc_return(&lock->cll_ref) == 1)
330 atomic_inc(&site->cs_locks.cs_busy);
332 EXPORT_SYMBOL(cl_lock_get_trust);
335 * Helper function destroying the lock that wasn't completely initialized.
337 * Other threads can acquire references to the top-lock through its
338 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
340 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
342 cl_lock_mutex_get(env, lock);
343 cl_lock_delete(env, lock);
344 cl_lock_mutex_put(env, lock);
345 cl_lock_put(env, lock);
348 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
349 struct cl_object *obj,
350 const struct cl_io *io,
351 const struct cl_lock_descr *descr)
353 struct cl_lock *lock;
354 struct lu_object_header *head;
355 struct cl_site *site = cl_object_site(obj);
358 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
360 atomic_set(&lock->cll_ref, 1);
361 lock->cll_descr = *descr;
362 lock->cll_state = CLS_NEW;
364 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
366 CFS_INIT_LIST_HEAD(&lock->cll_layers);
367 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
368 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
369 lu_ref_init(&lock->cll_reference);
370 lu_ref_init(&lock->cll_holders);
371 mutex_init(&lock->cll_guard);
372 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
373 cfs_waitq_init(&lock->cll_wq);
374 head = obj->co_lu.lo_header;
375 atomic_inc(&site->cs_locks_state[CLS_NEW]);
376 atomic_inc(&site->cs_locks.cs_total);
377 atomic_inc(&site->cs_locks.cs_created);
378 cl_lock_lockdep_init(lock);
379 list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
382 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
384 cl_lock_finish(env, lock);
390 lock = ERR_PTR(-ENOMEM);
395 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
396 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
397 * cover multiple stripes and can trigger cascading timeouts.
399 static int cl_lock_fits_into(const struct lu_env *env,
400 const struct cl_lock *lock,
401 const struct cl_lock_descr *need,
402 const struct cl_io *io)
404 const struct cl_lock_slice *slice;
406 LINVRNT(cl_lock_invariant_trusted(env, lock));
408 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
409 if (slice->cls_ops->clo_fits_into != NULL &&
410 !slice->cls_ops->clo_fits_into(env, slice, need, io))
416 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
417 struct cl_object *obj,
418 const struct cl_io *io,
419 const struct cl_lock_descr *need)
421 struct cl_lock *lock;
422 struct cl_object_header *head;
423 struct cl_site *site;
427 head = cl_object_header(obj);
428 site = cl_object_site(obj);
429 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
430 atomic_inc(&site->cs_locks.cs_lookup);
431 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
434 LASSERT(cl_is_lock(lock));
435 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
436 lock->cll_state < CLS_FREEING &&
437 !(lock->cll_flags & CLF_CANCELLED) &&
438 cl_lock_fits_into(env, lock, need, io);
439 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
440 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
443 cl_lock_get_trust(lock);
444 /* move the lock to the LRU head */
445 list_move(&lock->cll_linkage, &head->coh_locks);
446 atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
454 * Returns a lock matching description \a need.
456 * This is the main entry point into the cl_lock caching interface. First, a
457 * cache (implemented as a per-object linked list) is consulted. If lock is
458 * found there, it is returned immediately. Otherwise new lock is allocated
459 * and returned. In any case, additional reference to lock is acquired.
461 * \see cl_object_find(), cl_page_find()
463 static struct cl_lock *cl_lock_find(const struct lu_env *env,
464 const struct cl_io *io,
465 const struct cl_lock_descr *need)
467 struct cl_object_header *head;
468 struct cl_object *obj;
469 struct cl_lock *lock;
470 struct cl_site *site;
475 head = cl_object_header(obj);
476 site = cl_object_site(obj);
478 spin_lock(&head->coh_lock_guard);
479 lock = cl_lock_lookup(env, obj, io, need);
480 spin_unlock(&head->coh_lock_guard);
483 lock = cl_lock_alloc(env, obj, io, need);
485 struct cl_lock *ghost;
487 spin_lock(&head->coh_lock_guard);
488 ghost = cl_lock_lookup(env, obj, io, need);
490 list_add(&lock->cll_linkage, &head->coh_locks);
491 spin_unlock(&head->coh_lock_guard);
492 atomic_inc(&site->cs_locks.cs_busy);
494 spin_unlock(&head->coh_lock_guard);
496 * Other threads can acquire references to the
497 * top-lock through its sub-locks. Hence, it
498 * cannot be cl_lock_free()-ed immediately.
500 cl_lock_finish(env, lock);
509 * Returns existing lock matching given description. This is similar to
510 * cl_lock_find() except that no new lock is created, and returned lock is
511 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
513 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
514 const struct cl_lock_descr *need,
515 const char *scope, const void *source)
517 struct cl_object_header *head;
518 struct cl_object *obj;
519 struct cl_lock *lock;
522 head = cl_object_header(obj);
524 spin_lock(&head->coh_lock_guard);
525 lock = cl_lock_lookup(env, obj, io, need);
526 spin_unlock(&head->coh_lock_guard);
531 cl_lock_mutex_get(env, lock);
532 if (lock->cll_state == CLS_CACHED)
533 cl_use_try(env, lock);
534 ok = lock->cll_state == CLS_HELD;
536 cl_lock_hold_add(env, lock, scope, source);
537 cl_lock_user_add(env, lock);
539 cl_lock_mutex_put(env, lock);
541 cl_lock_put(env, lock);
547 EXPORT_SYMBOL(cl_lock_peek);
550 * Returns a slice within a lock, corresponding to the given layer in the
555 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
556 const struct lu_device_type *dtype)
558 const struct cl_lock_slice *slice;
560 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
563 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
564 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
569 EXPORT_SYMBOL(cl_lock_at);
571 static void cl_lock_trace(struct cl_thread_counters *counters,
572 const char *prefix, const struct cl_lock *lock)
574 CDEBUG(D_DLMTRACE|D_TRACE, "%s: %i@%p %p %i %i\n", prefix,
575 atomic_read(&lock->cll_ref), lock, lock->cll_guarder,
576 lock->cll_depth, counters->ctc_nr_locks_locked);
579 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
581 struct cl_thread_counters *counters;
583 counters = cl_lock_counters(env, lock);
585 counters->ctc_nr_locks_locked++;
586 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
587 cl_lock_trace(counters, "got mutex", lock);
591 * Locks cl_lock object.
593 * This is used to manipulate cl_lock fields, and to serialize state
594 * transitions in the lock state machine.
596 * \post cl_lock_is_mutexed(lock)
598 * \see cl_lock_mutex_put()
600 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
602 LINVRNT(cl_lock_invariant(env, lock));
604 if (lock->cll_guarder == cfs_current()) {
605 LINVRNT(cl_lock_is_mutexed(lock));
606 LINVRNT(lock->cll_depth > 0);
608 struct cl_object_header *hdr;
609 struct cl_thread_info *info;
612 LINVRNT(lock->cll_guarder != cfs_current());
613 hdr = cl_object_header(lock->cll_descr.cld_obj);
615 * Check that mutices are taken in the bottom-to-top order.
617 info = cl_env_info(env);
618 for (i = 0; i < hdr->coh_nesting; ++i)
619 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
620 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
621 lock->cll_guarder = cfs_current();
622 LINVRNT(lock->cll_depth == 0);
624 cl_lock_mutex_tail(env, lock);
626 EXPORT_SYMBOL(cl_lock_mutex_get);
629 * Try-locks cl_lock object.
631 * \retval 0 \a lock was successfully locked
633 * \retval -EBUSY \a lock cannot be locked right now
635 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
637 * \see cl_lock_mutex_get()
639 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
643 LINVRNT(cl_lock_invariant_trusted(env, lock));
647 if (lock->cll_guarder == cfs_current()) {
648 LINVRNT(lock->cll_depth > 0);
649 cl_lock_mutex_tail(env, lock);
650 } else if (mutex_trylock(&lock->cll_guard)) {
651 LINVRNT(lock->cll_depth == 0);
652 lock->cll_guarder = cfs_current();
653 cl_lock_mutex_tail(env, lock);
658 EXPORT_SYMBOL(cl_lock_mutex_try);
661 * Unlocks cl_lock object.
663 * \pre cl_lock_is_mutexed(lock)
665 * \see cl_lock_mutex_get()
667 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
669 struct cl_thread_counters *counters;
671 LINVRNT(cl_lock_invariant(env, lock));
672 LINVRNT(cl_lock_is_mutexed(lock));
673 LINVRNT(lock->cll_guarder == cfs_current());
674 LINVRNT(lock->cll_depth > 0);
676 counters = cl_lock_counters(env, lock);
677 LINVRNT(counters->ctc_nr_locks_locked > 0);
679 cl_lock_trace(counters, "put mutex", lock);
680 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
681 counters->ctc_nr_locks_locked--;
682 if (--lock->cll_depth == 0) {
683 lock->cll_guarder = NULL;
684 mutex_unlock(&lock->cll_guard);
687 EXPORT_SYMBOL(cl_lock_mutex_put);
690 * Returns true iff lock's mutex is owned by the current thread.
692 int cl_lock_is_mutexed(struct cl_lock *lock)
694 return lock->cll_guarder == cfs_current();
696 EXPORT_SYMBOL(cl_lock_is_mutexed);
699 * Returns number of cl_lock mutices held by the current thread (environment).
701 int cl_lock_nr_mutexed(const struct lu_env *env)
703 struct cl_thread_info *info;
708 * NOTE: if summation across all nesting levels (currently 2) proves
709 * too expensive, a summary counter can be added to
710 * struct cl_thread_info.
712 info = cl_env_info(env);
713 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
714 locked += info->clt_counters[i].ctc_nr_locks_locked;
717 EXPORT_SYMBOL(cl_lock_nr_mutexed);
719 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
721 LINVRNT(cl_lock_is_mutexed(lock));
722 LINVRNT(cl_lock_invariant(env, lock));
724 if (!(lock->cll_flags & CLF_CANCELLED)) {
725 const struct cl_lock_slice *slice;
727 lock->cll_flags |= CLF_CANCELLED;
728 list_for_each_entry_reverse(slice, &lock->cll_layers,
730 if (slice->cls_ops->clo_cancel != NULL)
731 slice->cls_ops->clo_cancel(env, slice);
737 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
739 struct cl_object_header *head;
740 const struct cl_lock_slice *slice;
742 LINVRNT(cl_lock_is_mutexed(lock));
743 LINVRNT(cl_lock_invariant(env, lock));
746 if (lock->cll_state < CLS_FREEING) {
747 cl_lock_state_set(env, lock, CLS_FREEING);
749 head = cl_object_header(lock->cll_descr.cld_obj);
751 spin_lock(&head->coh_lock_guard);
752 list_del_init(&lock->cll_linkage);
754 * No locks, no pages. This is only valid for bottom sub-locks
755 * and head->coh_nesting == 1 check assumes two level top-sub
758 LASSERT(ergo(head->coh_nesting == 1 &&
759 list_empty(&head->coh_locks), !head->coh_pages));
760 spin_unlock(&head->coh_lock_guard);
762 * From now on, no new references to this lock can be acquired
763 * by cl_lock_lookup().
765 list_for_each_entry_reverse(slice, &lock->cll_layers,
767 if (slice->cls_ops->clo_delete != NULL)
768 slice->cls_ops->clo_delete(env, slice);
771 * From now on, no new references to this lock can be acquired
772 * by layer-specific means (like a pointer from struct
773 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
776 * Lock will be finally freed in cl_lock_put() when last of
777 * existing references goes away.
784 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
785 * top-lock (nesting == 0) accounts for this modification in the per-thread
786 * debugging counters. Sub-lock holds can be released by a thread different
787 * from one that acquired it.
789 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
792 struct cl_thread_counters *counters;
793 enum clt_nesting_level nesting;
795 lock->cll_holds += delta;
796 nesting = cl_lock_nesting(lock);
797 if (nesting == CNL_TOP) {
798 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
799 counters->ctc_nr_held += delta;
800 LASSERT(counters->ctc_nr_held >= 0);
805 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
806 * cl_lock_hold_mod() for the explanation of the debugging code.
808 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
811 struct cl_thread_counters *counters;
812 enum clt_nesting_level nesting;
814 lock->cll_users += delta;
815 nesting = cl_lock_nesting(lock);
816 if (nesting == CNL_TOP) {
817 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
818 counters->ctc_nr_used += delta;
819 LASSERT(counters->ctc_nr_used >= 0);
823 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
824 const char *scope, const void *source)
826 LINVRNT(cl_lock_is_mutexed(lock));
827 LINVRNT(cl_lock_invariant(env, lock));
828 LASSERT(lock->cll_holds > 0);
831 lu_ref_del(&lock->cll_holders, scope, source);
832 cl_lock_hold_mod(env, lock, -1);
833 if (lock->cll_holds == 0) {
834 if (lock->cll_descr.cld_mode == CLM_PHANTOM)
836 * If lock is still phantom when user is done with
837 * it---destroy the lock.
839 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
840 if (lock->cll_flags & CLF_CANCELPEND) {
841 lock->cll_flags &= ~CLF_CANCELPEND;
842 cl_lock_cancel0(env, lock);
844 if (lock->cll_flags & CLF_DOOMED) {
845 /* no longer doomed: it's dead... Jim. */
846 lock->cll_flags &= ~CLF_DOOMED;
847 cl_lock_delete0(env, lock);
855 * Waits until lock state is changed.
857 * This function is called with cl_lock mutex locked, atomically releases
858 * mutex and goes to sleep, waiting for a lock state change (signaled by
859 * cl_lock_signal()), and re-acquires the mutex before return.
861 * This function is used to wait until lock state machine makes some progress
862 * and to emulate synchronous operations on top of asynchronous lock
865 * \retval -EINTR wait was interrupted
867 * \retval 0 wait wasn't interrupted
869 * \pre cl_lock_is_mutexed(lock)
871 * \see cl_lock_signal()
873 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
875 cfs_waitlink_t waiter;
879 LINVRNT(cl_lock_is_mutexed(lock));
880 LINVRNT(cl_lock_invariant(env, lock));
881 LASSERT(lock->cll_depth == 1);
882 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
884 result = lock->cll_error;
885 if (result == 0 && !(lock->cll_flags & CLF_STATE)) {
886 cfs_waitlink_init(&waiter);
887 cfs_waitq_add(&lock->cll_wq, &waiter);
888 set_current_state(CFS_TASK_INTERRUPTIBLE);
889 cl_lock_mutex_put(env, lock);
891 LASSERT(cl_lock_nr_mutexed(env) == 0);
892 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
894 cl_lock_mutex_get(env, lock);
895 set_current_state(CFS_TASK_RUNNING);
896 cfs_waitq_del(&lock->cll_wq, &waiter);
897 result = cfs_signal_pending() ? -EINTR : 0;
899 lock->cll_flags &= ~CLF_STATE;
902 EXPORT_SYMBOL(cl_lock_state_wait);
904 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
905 enum cl_lock_state state)
907 const struct cl_lock_slice *slice;
910 LINVRNT(cl_lock_is_mutexed(lock));
911 LINVRNT(cl_lock_invariant(env, lock));
913 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
914 if (slice->cls_ops->clo_state != NULL)
915 slice->cls_ops->clo_state(env, slice, state);
916 lock->cll_flags |= CLF_STATE;
917 cfs_waitq_broadcast(&lock->cll_wq);
922 * Notifies waiters that lock state changed.
924 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
925 * layers about state change by calling cl_lock_operations::clo_state()
928 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
931 cl_lock_state_signal(env, lock, lock->cll_state);
934 EXPORT_SYMBOL(cl_lock_signal);
937 * Changes lock state.
939 * This function is invoked to notify layers that lock state changed, possible
940 * as a result of an asynchronous event such as call-back reception.
942 * \post lock->cll_state == state
944 * \see cl_lock_operations::clo_state()
946 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
947 enum cl_lock_state state)
949 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
952 LASSERT(lock->cll_state <= state ||
953 (lock->cll_state == CLS_CACHED &&
954 (state == CLS_HELD || /* lock found in cache */
955 state == CLS_NEW /* sub-lock canceled */)) ||
956 /* sub-lock canceled during unlocking */
957 (lock->cll_state == CLS_UNLOCKING && state == CLS_NEW));
959 if (lock->cll_state != state) {
960 atomic_dec(&site->cs_locks_state[lock->cll_state]);
961 atomic_inc(&site->cs_locks_state[state]);
963 cl_lock_state_signal(env, lock, state);
964 lock->cll_state = state;
968 EXPORT_SYMBOL(cl_lock_state_set);
971 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
972 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
974 int cl_use_try(const struct lu_env *env, struct cl_lock *lock)
977 const struct cl_lock_slice *slice;
981 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
982 if (slice->cls_ops->clo_use != NULL) {
983 result = slice->cls_ops->clo_use(env, slice);
988 LASSERT(result != -ENOSYS);
990 cl_lock_state_set(env, lock, CLS_HELD);
993 EXPORT_SYMBOL(cl_use_try);
996 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
999 static int cl_enqueue_kick(const struct lu_env *env,
1000 struct cl_lock *lock,
1001 struct cl_io *io, __u32 flags)
1004 const struct cl_lock_slice *slice;
1008 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1009 if (slice->cls_ops->clo_enqueue != NULL) {
1010 result = slice->cls_ops->clo_enqueue(env,
1016 LASSERT(result != -ENOSYS);
1021 * Tries to enqueue a lock.
1023 * This function is called repeatedly by cl_enqueue() until either lock is
1024 * enqueued, or error occurs. This function does not block waiting for
1025 * networking communication to complete.
1027 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1028 * lock->cll_state == CLS_HELD)
1030 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1031 * \see cl_lock_state::CLS_ENQUEUED
1033 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1034 struct cl_io *io, __u32 flags)
1042 LINVRNT(cl_lock_is_mutexed(lock));
1044 if (lock->cll_error != 0)
1046 switch (lock->cll_state) {
1048 cl_lock_state_set(env, lock, CLS_QUEUING);
1052 result = cl_enqueue_kick(env, lock, io, flags);
1054 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1057 /* wait until unlocking finishes, and enqueue lock
1062 /* yank lock from the cache. */
1063 result = cl_use_try(env, lock);
1072 * impossible, only held locks with increased
1073 * ->cll_holds can be enqueued, and they cannot be
1078 } while (result == CLO_REPEAT);
1080 cl_lock_error(env, lock, result);
1081 RETURN(result ?: lock->cll_error);
1083 EXPORT_SYMBOL(cl_enqueue_try);
1085 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1086 struct cl_io *io, __u32 enqflags)
1092 LINVRNT(cl_lock_is_mutexed(lock));
1093 LINVRNT(cl_lock_invariant(env, lock));
1094 LASSERT(lock->cll_holds > 0);
1096 cl_lock_user_add(env, lock);
1098 result = cl_enqueue_try(env, lock, io, enqflags);
1099 if (result == CLO_WAIT) {
1100 result = cl_lock_state_wait(env, lock);
1107 cl_lock_user_del(env, lock);
1108 if (result != -EINTR)
1109 cl_lock_error(env, lock, result);
1111 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1112 lock->cll_state == CLS_HELD));
1119 * \pre current thread or io owns a hold on lock.
1121 * \post ergo(result == 0, lock->users increased)
1122 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1123 * lock->cll_state == CLS_HELD)
1125 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1126 struct cl_io *io, __u32 enqflags)
1132 cl_lock_lockdep_acquire(env, lock, enqflags);
1133 cl_lock_mutex_get(env, lock);
1134 result = cl_enqueue_locked(env, lock, io, enqflags);
1135 cl_lock_mutex_put(env, lock);
1137 cl_lock_lockdep_release(env, lock);
1138 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1139 lock->cll_state == CLS_HELD));
1142 EXPORT_SYMBOL(cl_enqueue);
1145 * Tries to unlock a lock.
1147 * This function is called repeatedly by cl_unuse() until either lock is
1148 * unlocked, or error occurs.
1150 * \ppre lock->cll_state <= CLS_HELD || lock->cll_state == CLS_UNLOCKING
1152 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1154 * \see cl_unuse() cl_lock_operations::clo_unuse()
1155 * \see cl_lock_state::CLS_CACHED
1157 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1159 const struct cl_lock_slice *slice;
1163 if (lock->cll_state != CLS_UNLOCKING) {
1164 if (lock->cll_users > 1) {
1165 cl_lock_user_del(env, lock);
1169 * New lock users (->cll_users) are not protecting unlocking
1170 * from proceeding. From this point, lock eventually reaches
1171 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1174 cl_lock_state_set(env, lock, CLS_UNLOCKING);
1179 if (lock->cll_error != 0)
1182 LINVRNT(cl_lock_is_mutexed(lock));
1183 LINVRNT(cl_lock_invariant(env, lock));
1184 LASSERT(lock->cll_state == CLS_UNLOCKING);
1185 LASSERT(lock->cll_users > 0);
1186 LASSERT(lock->cll_holds > 0);
1189 list_for_each_entry_reverse(slice, &lock->cll_layers,
1191 if (slice->cls_ops->clo_unuse != NULL) {
1192 result = slice->cls_ops->clo_unuse(env, slice);
1197 LASSERT(result != -ENOSYS);
1198 } while (result == CLO_REPEAT);
1199 if (result != CLO_WAIT)
1201 * Once there is no more need to iterate ->clo_unuse() calls,
1202 * remove lock user. This is done even if unrecoverable error
1203 * happened during unlocking, because nothing else can be
1206 cl_lock_user_del(env, lock);
1207 if (result == 0 || result == -ESTALE) {
1208 enum cl_lock_state state;
1211 * Return lock back to the cache. This is the only
1212 * place where lock is moved into CLS_CACHED state.
1214 * If one of ->clo_unuse() methods returned -ESTALE, lock
1215 * cannot be placed into cache and has to be
1216 * re-initialized. This happens e.g., when a sub-lock was
1217 * canceled while unlocking was in progress.
1219 state = result == 0 ? CLS_CACHED : CLS_NEW;
1220 cl_lock_state_set(env, lock, state);
1223 * Hide -ESTALE error.
1224 * If the lock is a glimpse lock, and it has multiple
1225 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1226 * and other sublocks are matched write locks. In this case,
1227 * we can't set this lock to error because otherwise some of
1228 * its sublocks may not be canceled. This causes some dirty
1229 * pages won't be written to OSTs. -jay
1233 result = result ?: lock->cll_error;
1235 cl_lock_error(env, lock, result);
1238 EXPORT_SYMBOL(cl_unuse_try);
1240 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1243 LASSERT(lock->cll_state <= CLS_HELD);
1247 result = cl_unuse_try(env, lock);
1248 if (result == CLO_WAIT) {
1249 result = cl_lock_state_wait(env, lock);
1261 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1264 cl_lock_mutex_get(env, lock);
1265 cl_unuse_locked(env, lock);
1266 cl_lock_mutex_put(env, lock);
1267 cl_lock_lockdep_release(env, lock);
1270 EXPORT_SYMBOL(cl_unuse);
1273 * Tries to wait for a lock.
1275 * This function is called repeatedly by cl_wait() until either lock is
1276 * granted, or error occurs. This function does not block waiting for network
1277 * communication to complete.
1279 * \see cl_wait() cl_lock_operations::clo_wait()
1280 * \see cl_lock_state::CLS_HELD
1282 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1284 const struct cl_lock_slice *slice;
1289 LINVRNT(cl_lock_is_mutexed(lock));
1290 LINVRNT(cl_lock_invariant(env, lock));
1291 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1292 lock->cll_state == CLS_HELD);
1293 LASSERT(lock->cll_users > 0);
1294 LASSERT(lock->cll_holds > 0);
1297 if (lock->cll_error != 0)
1299 if (lock->cll_state == CLS_HELD)
1304 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1305 if (slice->cls_ops->clo_wait != NULL) {
1306 result = slice->cls_ops->clo_wait(env, slice);
1311 LASSERT(result != -ENOSYS);
1313 cl_lock_state_set(env, lock, CLS_HELD);
1314 } while (result == CLO_REPEAT);
1315 RETURN(result ?: lock->cll_error);
1317 EXPORT_SYMBOL(cl_wait_try);
1320 * Waits until enqueued lock is granted.
1322 * \pre current thread or io owns a hold on the lock
1323 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1324 * lock->cll_state == CLS_HELD)
1326 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1328 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1333 cl_lock_mutex_get(env, lock);
1335 LINVRNT(cl_lock_invariant(env, lock));
1336 LASSERT(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD);
1337 LASSERT(lock->cll_holds > 0);
1340 result = cl_wait_try(env, lock);
1341 if (result == CLO_WAIT) {
1342 result = cl_lock_state_wait(env, lock);
1349 cl_lock_user_del(env, lock);
1350 if (result != -EINTR)
1351 cl_lock_error(env, lock, result);
1352 cl_lock_lockdep_release(env, lock);
1354 cl_lock_mutex_put(env, lock);
1355 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1358 EXPORT_SYMBOL(cl_wait);
1361 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1364 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1366 const struct cl_lock_slice *slice;
1367 unsigned long pound;
1368 unsigned long ounce;
1371 LINVRNT(cl_lock_is_mutexed(lock));
1372 LINVRNT(cl_lock_invariant(env, lock));
1375 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1376 if (slice->cls_ops->clo_weigh != NULL) {
1377 ounce = slice->cls_ops->clo_weigh(env, slice);
1379 if (pound < ounce) /* over-weight^Wflow */
1385 EXPORT_SYMBOL(cl_lock_weigh);
1388 * Notifies layers that lock description changed.
1390 * The server can grant client a lock different from one that was requested
1391 * (e.g., larger in extent). This method is called when actually granted lock
1392 * description becomes known to let layers to accommodate for changed lock
1395 * \see cl_lock_operations::clo_modify()
1397 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1398 const struct cl_lock_descr *desc)
1400 const struct cl_lock_slice *slice;
1401 struct cl_object *obj = lock->cll_descr.cld_obj;
1402 struct cl_object_header *hdr = cl_object_header(obj);
1406 /* don't allow object to change */
1407 LASSERT(obj == desc->cld_obj);
1408 LINVRNT(cl_lock_is_mutexed(lock));
1409 LINVRNT(cl_lock_invariant(env, lock));
1411 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1412 if (slice->cls_ops->clo_modify != NULL) {
1413 result = slice->cls_ops->clo_modify(env, slice, desc);
1418 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1419 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1421 * Just replace description in place. Nothing more is needed for
1422 * now. If locks were indexed according to their extent and/or mode,
1423 * that index would have to be updated here.
1425 spin_lock(&hdr->coh_lock_guard);
1426 lock->cll_descr = *desc;
1427 spin_unlock(&hdr->coh_lock_guard);
1430 EXPORT_SYMBOL(cl_lock_modify);
1433 * Initializes lock closure with a given origin.
1435 * \see cl_lock_closure
1437 void cl_lock_closure_init(const struct lu_env *env,
1438 struct cl_lock_closure *closure,
1439 struct cl_lock *origin, int wait)
1441 LINVRNT(cl_lock_is_mutexed(origin));
1442 LINVRNT(cl_lock_invariant(env, origin));
1444 CFS_INIT_LIST_HEAD(&closure->clc_list);
1445 closure->clc_origin = origin;
1446 closure->clc_wait = wait;
1447 closure->clc_nr = 0;
1449 EXPORT_SYMBOL(cl_lock_closure_init);
1452 * Builds a closure of \a lock.
1454 * Building of a closure consists of adding initial lock (\a lock) into it,
1455 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1456 * methods might call cl_lock_closure_build() recursively again, adding more
1457 * locks to the closure, etc.
1459 * \see cl_lock_closure
1461 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1462 struct cl_lock_closure *closure)
1464 const struct cl_lock_slice *slice;
1468 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1469 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1471 result = cl_lock_enclosure(env, lock, closure);
1473 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1474 if (slice->cls_ops->clo_closure != NULL) {
1475 result = slice->cls_ops->clo_closure(env, slice,
1483 cl_lock_disclosure(env, closure);
1486 EXPORT_SYMBOL(cl_lock_closure_build);
1489 * Adds new lock to a closure.
1491 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1492 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1493 * until next try-lock is likely to succeed.
1495 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1496 struct cl_lock_closure *closure)
1500 if (!cl_lock_mutex_try(env, lock)) {
1502 * If lock->cll_inclosure is not empty, lock is already in
1505 if (list_empty(&lock->cll_inclosure)) {
1506 cl_lock_get_trust(lock);
1507 lu_ref_add(&lock->cll_reference, "closure", closure);
1508 list_add(&lock->cll_inclosure, &closure->clc_list);
1511 cl_lock_mutex_put(env, lock);
1514 cl_lock_disclosure(env, closure);
1515 if (closure->clc_wait) {
1516 cl_lock_get_trust(lock);
1517 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1518 cl_lock_mutex_put(env, closure->clc_origin);
1520 LASSERT(cl_lock_nr_mutexed(env) == 0);
1521 cl_lock_mutex_get(env, lock);
1522 cl_lock_mutex_put(env, lock);
1524 cl_lock_mutex_get(env, closure->clc_origin);
1525 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1526 cl_lock_put(env, lock);
1528 result = CLO_REPEAT;
1532 EXPORT_SYMBOL(cl_lock_enclosure);
1534 /** Releases mutices of enclosed locks. */
1535 void cl_lock_disclosure(const struct lu_env *env,
1536 struct cl_lock_closure *closure)
1538 struct cl_lock *scan;
1539 struct cl_lock *temp;
1541 list_for_each_entry_safe(scan, temp, &closure->clc_list, cll_inclosure){
1542 list_del_init(&scan->cll_inclosure);
1543 cl_lock_mutex_put(env, scan);
1544 lu_ref_del(&scan->cll_reference, "closure", closure);
1545 cl_lock_put(env, scan);
1548 LASSERT(closure->clc_nr == 0);
1550 EXPORT_SYMBOL(cl_lock_disclosure);
1552 /** Finalizes a closure. */
1553 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1555 LASSERT(closure->clc_nr == 0);
1556 LASSERT(list_empty(&closure->clc_list));
1558 EXPORT_SYMBOL(cl_lock_closure_fini);
1561 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1562 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1563 * destruction until all holds are released. This is called when a decision is
1564 * made to destroy the lock in the future. E.g., when a blocking AST is
1565 * received on it, or fatal communication error happens.
1567 * Caller must have a reference on this lock to prevent a situation, when
1568 * deleted lock lingers in memory for indefinite time, because nobody calls
1569 * cl_lock_put() to finish it.
1571 * \pre atomic_read(&lock->cll_ref) > 0
1572 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1573 * cl_lock_nr_mutexed(env) == 1)
1574 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1575 * held, as deletion of sub-locks might require releasing a top-lock
1578 * \see cl_lock_operations::clo_delete()
1579 * \see cl_lock::cll_holds
1581 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1583 LINVRNT(cl_lock_is_mutexed(lock));
1584 LINVRNT(cl_lock_invariant(env, lock));
1585 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1586 cl_lock_nr_mutexed(env) == 1));
1589 if (lock->cll_holds == 0)
1590 cl_lock_delete0(env, lock);
1592 lock->cll_flags |= CLF_DOOMED;
1595 EXPORT_SYMBOL(cl_lock_delete);
1598 * Mark lock as irrecoverably failed, and mark it for destruction. This
1599 * happens when, e.g., server fails to grant a lock to us, or networking
1602 * \pre atomic_read(&lock->cll_ref) > 0
1603 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1604 * cl_lock_nr_mutexed(env) == 1)
1605 * [i.e., if a top-lock failed, mutices of no other locks can be held, as
1606 * failing sub-locks might require releasing a top-lock mutex]
1608 * \see clo_lock_delete()
1609 * \see cl_lock::cll_holds
1611 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1613 LINVRNT(cl_lock_is_mutexed(lock));
1614 LINVRNT(cl_lock_invariant(env, lock));
1617 if (lock->cll_error == 0 && error != 0) {
1618 lock->cll_error = error;
1619 cl_lock_signal(env, lock);
1620 cl_lock_cancel(env, lock);
1621 cl_lock_delete(env, lock);
1625 EXPORT_SYMBOL(cl_lock_error);
1628 * Cancels this lock. Notifies layers
1629 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1630 * there are holds on the lock, postpone cancellation until
1631 * all holds are released.
1633 * Cancellation notification is delivered to layers at most once.
1635 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1636 * cl_lock_nr_mutexed(env) == 1)
1637 * [i.e., if a top-lock is canceled, mutices of no other locks can be
1638 * held, as cancellation of sub-locks might require releasing a top-lock
1641 * \see cl_lock_operations::clo_cancel()
1642 * \see cl_lock::cll_holds
1644 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1646 LINVRNT(cl_lock_is_mutexed(lock));
1647 LINVRNT(cl_lock_invariant(env, lock));
1648 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1649 cl_lock_nr_mutexed(env) == 1));
1652 if (lock->cll_holds == 0)
1653 cl_lock_cancel0(env, lock);
1655 lock->cll_flags |= CLF_CANCELPEND;
1658 EXPORT_SYMBOL(cl_lock_cancel);
1661 * Finds an existing lock covering given page and optionally different from a
1662 * given \a except lock.
1664 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1665 struct cl_page *page, struct cl_lock *except,
1666 int pending, int canceld)
1668 struct cl_object_header *head;
1669 struct cl_lock *scan;
1670 struct cl_lock *lock;
1671 struct cl_lock_descr *need;
1675 head = cl_object_header(obj);
1676 need = &cl_env_info(env)->clt_descr;
1679 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1681 need->cld_start = need->cld_end = page->cp_index;
1683 spin_lock(&head->coh_lock_guard);
1684 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1685 if (scan != except &&
1686 cl_lock_ext_match(&scan->cll_descr, need) &&
1687 scan->cll_state < CLS_FREEING &&
1689 * This check is racy as the lock can be canceled right
1690 * after it is done, but this is fine, because page exists
1693 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1694 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1695 /* Don't increase cs_hit here since this
1696 * is just a helper function. */
1697 cl_lock_get_trust(scan);
1702 spin_unlock(&head->coh_lock_guard);
1705 EXPORT_SYMBOL(cl_lock_at_page);
1708 * Returns a list of pages protected (only) by a given lock.
1710 * Scans an extent of page radix tree, corresponding to the \a lock and queues
1711 * all pages that are not protected by locks other than \a lock into \a queue.
1713 void cl_lock_page_list_fixup(const struct lu_env *env,
1714 struct cl_io *io, struct cl_lock *lock,
1715 struct cl_page_list *queue)
1717 struct cl_page *page;
1718 struct cl_page *temp;
1719 struct cl_page_list *plist = &cl_env_info(env)->clt_list;
1721 LINVRNT(cl_lock_invariant(env, lock));
1724 /* Now, we have a list of cl_pages under the \a lock, we need
1725 * to check if some of pages are covered by other ldlm lock.
1726 * If this is the case, they aren't needed to be written out this time.
1728 * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1729 * the latter is to be canceled, this means other client is
1730 * reading/writing [200,300] since A won't canceled. Actually
1731 * we just need to write the pages covered by [200,300]. This is safe,
1732 * since [100,200] is also protected lock A.
1735 cl_page_list_init(plist);
1736 cl_page_list_for_each_safe(page, temp, queue) {
1737 pgoff_t idx = page->cp_index;
1738 struct cl_lock *found;
1739 struct cl_lock_descr *descr;
1741 /* The algorithm counts on the index-ascending page index. */
1742 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1743 page->cp_index < temp->cp_index));
1745 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1750 descr = &found->cll_descr;
1751 list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1753 idx = page->cp_index;
1754 if (descr->cld_start > idx || descr->cld_end < idx)
1756 cl_page_list_move(plist, queue, page);
1758 cl_lock_put(env, found);
1761 /* The pages in plist are covered by other locks, don't handle them
1765 cl_page_list_disown(env, io, plist);
1766 cl_page_list_fini(env, plist);
1769 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1772 * Invalidate pages protected by the given lock, sending them out to the
1773 * server first, if necessary.
1775 * This function does the following:
1777 * - collects a list of pages to be invalidated,
1779 * - unmaps them from the user virtual memory,
1781 * - sends dirty pages to the server,
1783 * - waits for transfer completion,
1785 * - discards pages, and throws them out of memory.
1787 * If \a discard is set, pages are discarded without sending them to the
1790 * If error happens on any step, the process continues anyway (the reasoning
1791 * behind this being that lock cancellation cannot be delayed indefinitely).
1793 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1796 struct cl_thread_info *info = cl_env_info(env);
1797 struct cl_io *io = &info->clt_io;
1798 struct cl_2queue *queue = &info->clt_queue;
1799 struct cl_lock_descr *descr = &lock->cll_descr;
1804 LINVRNT(cl_lock_invariant(env, lock));
1807 io->ci_obj = cl_object_top(descr->cld_obj);
1808 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1811 cl_2queue_init(queue);
1812 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1813 descr->cld_end, &queue->c2_qin);
1814 if (queue->c2_qin.pl_nr > 0) {
1815 result = cl_page_list_unmap(env, io, &queue->c2_qin);
1817 rc0 = cl_io_submit_rw(env, io, CRT_WRITE,
1819 rc1 = cl_page_list_own(env, io,
1821 result = result ?: rc0 ?: rc1;
1823 cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
1824 cl_2queue_discard(env, io, queue);
1825 cl_2queue_disown(env, io, queue);
1827 cl_2queue_fini(env, queue);
1829 cl_io_fini(env, io);
1832 EXPORT_SYMBOL(cl_lock_page_out);
1835 * Eliminate all locks for a given object.
1837 * Caller has to guarantee that no lock is in active use.
1839 * \param cancel when this is set, cl_locks_prune() cancels locks before
1842 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1844 struct cl_object_header *head;
1845 struct cl_lock *lock;
1848 head = cl_object_header(obj);
1850 * If locks are destroyed without cancellation, all pages must be
1851 * already destroyed (as otherwise they will be left unprotected).
1853 LASSERT(ergo(!cancel,
1854 head->coh_tree.rnode == NULL && head->coh_pages == 0));
1856 spin_lock(&head->coh_lock_guard);
1857 while (!list_empty(&head->coh_locks)) {
1858 lock = container_of(head->coh_locks.next,
1859 struct cl_lock, cll_linkage);
1860 cl_lock_get_trust(lock);
1861 spin_unlock(&head->coh_lock_guard);
1862 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
1863 cl_lock_mutex_get(env, lock);
1864 if (lock->cll_state < CLS_FREEING) {
1865 LASSERT(lock->cll_holds == 0);
1866 LASSERT(lock->cll_users == 0);
1868 cl_lock_cancel(env, lock);
1869 cl_lock_delete(env, lock);
1871 cl_lock_mutex_put(env, lock);
1872 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
1873 cl_lock_put(env, lock);
1874 spin_lock(&head->coh_lock_guard);
1876 spin_unlock(&head->coh_lock_guard);
1879 EXPORT_SYMBOL(cl_locks_prune);
1882 * Returns true if \a addr is an address of an allocated cl_lock. Used in
1883 * assertions. This check is optimistically imprecise, i.e., it occasionally
1884 * returns true for the incorrect addresses, but if it returns false, then the
1885 * address is guaranteed to be incorrect. (Should be named cl_lockp().)
1889 int cl_is_lock(const void *addr)
1891 return cfs_mem_is_in_cache(addr, cl_lock_kmem);
1893 EXPORT_SYMBOL(cl_is_lock);
1895 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
1896 const struct cl_io *io,
1897 const struct cl_lock_descr *need,
1898 const char *scope, const void *source)
1900 struct cl_lock *lock;
1905 lock = cl_lock_find(env, io, need);
1908 cl_lock_mutex_get(env, lock);
1909 if (lock->cll_state < CLS_FREEING) {
1910 cl_lock_hold_mod(env, lock, +1);
1911 lu_ref_add(&lock->cll_holders, scope, source);
1912 lu_ref_add(&lock->cll_reference, scope, source);
1915 cl_lock_mutex_put(env, lock);
1916 cl_lock_put(env, lock);
1922 * Returns a lock matching \a need description with a reference and a hold on
1925 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
1926 * guarantees that lock is not in the CLS_FREEING state on return.
1928 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
1929 const struct cl_lock_descr *need,
1930 const char *scope, const void *source)
1932 struct cl_lock *lock;
1936 lock = cl_lock_hold_mutex(env, io, need, scope, source);
1938 cl_lock_mutex_put(env, lock);
1941 EXPORT_SYMBOL(cl_lock_hold);
1944 * Main high-level entry point of cl_lock interface that finds existing or
1945 * enqueues new lock matching given description.
1947 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
1948 const struct cl_lock_descr *need,
1950 const char *scope, const void *source)
1952 struct cl_lock *lock;
1953 const struct lu_fid *fid;
1959 fid = lu_object_fid(&io->ci_obj->co_lu);
1962 warn = iter >= 16 && IS_PO2(iter);
1963 CDEBUG(warn ? D_WARNING : D_DLMTRACE,
1964 DDESCR"@"DFID" %i %08x `%s'\n",
1965 PDESCR(need), PFID(fid), iter, enqflags, scope);
1966 lock = cl_lock_hold_mutex(env, io, need, scope, source);
1967 if (!IS_ERR(lock)) {
1968 rc = cl_enqueue_locked(env, lock, io, enqflags);
1970 if (cl_lock_fits_into(env, lock, need, io)) {
1971 cl_lock_mutex_put(env, lock);
1972 cl_lock_lockdep_acquire(env,
1976 CL_LOCK_DEBUG(D_WARNING, env, lock,
1977 "got (see bug 17665)\n");
1978 cl_unuse_locked(env, lock);
1980 cl_lock_hold_release(env, lock, scope, source);
1981 cl_lock_mutex_put(env, lock);
1982 lu_ref_del(&lock->cll_reference, scope, source);
1983 cl_lock_put(env, lock);
1991 EXPORT_SYMBOL(cl_lock_request);
1994 * Adds a hold to a known lock.
1996 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
1997 const char *scope, const void *source)
1999 LINVRNT(cl_lock_is_mutexed(lock));
2000 LINVRNT(cl_lock_invariant(env, lock));
2001 LASSERT(lock->cll_state != CLS_FREEING);
2004 cl_lock_hold_mod(env, lock, +1);
2006 lu_ref_add(&lock->cll_holders, scope, source);
2007 lu_ref_add(&lock->cll_reference, scope, source);
2010 EXPORT_SYMBOL(cl_lock_hold_add);
2013 * Releases a hold and a reference on a lock, on which caller acquired a
2016 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2017 const char *scope, const void *source)
2019 LINVRNT(cl_lock_invariant(env, lock));
2021 cl_lock_hold_release(env, lock, scope, source);
2022 lu_ref_del(&lock->cll_reference, scope, source);
2023 cl_lock_put(env, lock);
2026 EXPORT_SYMBOL(cl_lock_unhold);
2029 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2031 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2032 const char *scope, const void *source)
2034 LINVRNT(cl_lock_invariant(env, lock));
2036 cl_lock_mutex_get(env, lock);
2037 cl_lock_hold_release(env, lock, scope, source);
2038 cl_lock_mutex_put(env, lock);
2039 lu_ref_del(&lock->cll_reference, scope, source);
2040 cl_lock_put(env, lock);
2043 EXPORT_SYMBOL(cl_lock_release);
2045 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2047 LINVRNT(cl_lock_is_mutexed(lock));
2048 LINVRNT(cl_lock_invariant(env, lock));
2051 cl_lock_used_mod(env, lock, +1);
2054 EXPORT_SYMBOL(cl_lock_user_add);
2056 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2058 LINVRNT(cl_lock_is_mutexed(lock));
2059 LINVRNT(cl_lock_invariant(env, lock));
2060 LASSERT(lock->cll_users > 0);
2063 cl_lock_used_mod(env, lock, -1);
2064 RETURN(lock->cll_users == 0);
2066 EXPORT_SYMBOL(cl_lock_user_del);
2069 * Check if two lock's mode are compatible.
2071 * This returns true iff en-queuing \a lock2 won't cause cancellation of \a
2072 * lock1 even when these locks overlap.
2074 int cl_lock_compatible(const struct cl_lock *lock1, const struct cl_lock *lock2)
2076 enum cl_lock_mode mode1;
2077 enum cl_lock_mode mode2;
2080 mode1 = lock1->cll_descr.cld_mode;
2081 mode2 = lock2->cll_descr.cld_mode;
2082 RETURN(mode2 == CLM_PHANTOM ||
2083 (mode1 == CLM_READ && mode2 == CLM_READ));
2085 EXPORT_SYMBOL(cl_lock_compatible);
2087 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2089 static const char *names[] = {
2090 [CLM_PHANTOM] = "PHANTOM",
2091 [CLM_READ] = "READ",
2092 [CLM_WRITE] = "WRITE"
2094 if (0 <= mode && mode < ARRAY_SIZE(names))
2099 EXPORT_SYMBOL(cl_lock_mode_name);
2102 * Prints human readable representation of a lock description.
2104 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2105 lu_printer_t printer,
2106 const struct cl_lock_descr *descr)
2108 const struct lu_fid *fid;
2110 fid = lu_object_fid(&descr->cld_obj->co_lu);
2111 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2113 EXPORT_SYMBOL(cl_lock_descr_print);
2116 * Prints human readable representation of \a lock to the \a f.
2118 void cl_lock_print(const struct lu_env *env, void *cookie,
2119 lu_printer_t printer, const struct cl_lock *lock)
2121 const struct cl_lock_slice *slice;
2122 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2123 lock, atomic_read(&lock->cll_ref),
2124 lock->cll_state, lock->cll_error, lock->cll_holds,
2125 lock->cll_users, lock->cll_flags);
2126 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2127 (*printer)(env, cookie, " {\n");
2129 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2130 (*printer)(env, cookie, " %s@%p: ",
2131 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2133 if (slice->cls_ops->clo_print != NULL)
2134 slice->cls_ops->clo_print(env, cookie, printer, slice);
2135 (*printer)(env, cookie, "\n");
2137 (*printer)(env, cookie, "} lock@%p\n", lock);
2139 EXPORT_SYMBOL(cl_lock_print);
2141 int cl_lock_init(void)
2143 return lu_kmem_init(cl_lock_caches);
2146 void cl_lock_fini(void)
2148 lu_kmem_fini(cl_lock_caches);