1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /** Lock class of cl_lock::cll_guard */
57 static struct lock_class_key cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
60 static struct lu_kmem_descr cl_lock_caches[] = {
62 .ckd_cache = &cl_lock_kmem,
63 .ckd_name = "cl_lock_kmem",
64 .ckd_size = sizeof (struct cl_lock)
72 * Basic lock invariant that is maintained at all times. Caller either has a
73 * reference to \a lock, or somehow assures that \a lock cannot be freed.
75 * \see cl_lock_invariant()
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78 const struct cl_lock *lock)
82 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84 lock->cll_holds >= lock->cll_users &&
85 lock->cll_holds >= 0 &&
86 lock->cll_users >= 0 &&
91 * Stronger lock invariant, checking that caller has a reference on a lock.
93 * \see cl_lock_invariant_trusted()
95 static int cl_lock_invariant(const struct lu_env *env,
96 const struct cl_lock *lock)
100 result = atomic_read(&lock->cll_ref) > 0 &&
101 cl_lock_invariant_trusted(env, lock);
102 if (!result && env != NULL)
103 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
108 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
112 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
116 * Returns a set of counters for this lock, depending on a lock nesting.
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119 const struct cl_lock *lock)
121 struct cl_thread_info *info;
122 enum clt_nesting_level nesting;
124 info = cl_env_info(env);
125 nesting = cl_lock_nesting(lock);
126 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127 return &info->clt_counters[nesting];
130 #define RETIP ((unsigned long)__builtin_return_address(0))
132 #ifdef CONFIG_LOCKDEP
133 static struct lock_class_key cl_lock_key;
135 static void cl_lock_lockdep_init(struct cl_lock *lock)
137 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
140 static void cl_lock_lockdep_acquire(const struct lu_env *env,
141 struct cl_lock *lock, __u32 enqflags)
143 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
144 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
145 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
146 /* check: */ 2, RETIP);
149 static void cl_lock_lockdep_release(const struct lu_env *env,
150 struct cl_lock *lock)
152 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
153 lock_release(&lock->dep_map, 0, RETIP);
156 #else /* !CONFIG_LOCKDEP */
158 static void cl_lock_lockdep_init(struct cl_lock *lock)
160 static void cl_lock_lockdep_acquire(const struct lu_env *env,
161 struct cl_lock *lock, __u32 enqflags)
163 static void cl_lock_lockdep_release(const struct lu_env *env,
164 struct cl_lock *lock)
167 #endif /* !CONFIG_LOCKDEP */
170 * Adds lock slice to the compound lock.
172 * This is called by cl_object_operations::coo_lock_init() methods to add a
173 * per-layer state to the lock. New state is added at the end of
174 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
176 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
178 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
179 struct cl_object *obj,
180 const struct cl_lock_operations *ops)
183 slice->cls_lock = lock;
184 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
185 slice->cls_obj = obj;
186 slice->cls_ops = ops;
189 EXPORT_SYMBOL(cl_lock_slice_add);
192 * Returns true iff a lock with the mode \a has provides at least the same
193 * guarantees as a lock with the mode \a need.
195 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
197 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
198 need == CLM_PHANTOM || need == CLM_GROUP);
199 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
200 has == CLM_PHANTOM || has == CLM_GROUP);
201 CLASSERT(CLM_PHANTOM < CLM_READ);
202 CLASSERT(CLM_READ < CLM_WRITE);
203 CLASSERT(CLM_WRITE < CLM_GROUP);
205 if (has != CLM_GROUP)
210 EXPORT_SYMBOL(cl_lock_mode_match);
213 * Returns true iff extent portions of lock descriptions match.
215 int cl_lock_ext_match(const struct cl_lock_descr *has,
216 const struct cl_lock_descr *need)
219 has->cld_start <= need->cld_start &&
220 has->cld_end >= need->cld_end &&
221 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
222 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
224 EXPORT_SYMBOL(cl_lock_ext_match);
227 * Returns true iff a lock with the description \a has provides at least the
228 * same guarantees as a lock with the description \a need.
230 int cl_lock_descr_match(const struct cl_lock_descr *has,
231 const struct cl_lock_descr *need)
234 cl_object_same(has->cld_obj, need->cld_obj) &&
235 cl_lock_ext_match(has, need);
237 EXPORT_SYMBOL(cl_lock_descr_match);
239 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
241 struct cl_object *obj = lock->cll_descr.cld_obj;
243 LASSERT(cl_is_lock(lock));
244 LINVRNT(!cl_lock_is_mutexed(lock));
248 while (!list_empty(&lock->cll_layers)) {
249 struct cl_lock_slice *slice;
251 slice = list_entry(lock->cll_layers.next, struct cl_lock_slice,
253 list_del_init(lock->cll_layers.next);
254 slice->cls_ops->clo_fini(env, slice);
256 atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
257 atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
258 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
259 cl_object_put(env, obj);
260 lu_ref_fini(&lock->cll_reference);
261 lu_ref_fini(&lock->cll_holders);
262 mutex_destroy(&lock->cll_guard);
263 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
268 * Releases a reference on a lock.
270 * When last reference is released, lock is returned to the cache, unless it
271 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
274 * \see cl_object_put(), cl_page_put()
276 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
278 struct cl_object *obj;
279 struct cl_object_header *head;
280 struct cl_site *site;
282 LINVRNT(cl_lock_invariant(env, lock));
284 obj = lock->cll_descr.cld_obj;
285 LINVRNT(obj != NULL);
286 head = cl_object_header(obj);
287 site = cl_object_site(obj);
289 CDEBUG(D_DLMTRACE, "releasing reference: %d %p %lu\n",
290 atomic_read(&lock->cll_ref), lock, RETIP);
292 if (atomic_dec_and_test(&lock->cll_ref)) {
293 if (lock->cll_state == CLS_FREEING) {
294 LASSERT(list_empty(&lock->cll_linkage));
295 cl_lock_free(env, lock);
297 atomic_dec(&site->cs_locks.cs_busy);
301 EXPORT_SYMBOL(cl_lock_put);
304 * Acquires an additional reference to a lock.
306 * This can be called only by caller already possessing a reference to \a
309 * \see cl_object_get(), cl_page_get()
311 void cl_lock_get(struct cl_lock *lock)
313 LINVRNT(cl_lock_invariant(NULL, lock));
314 CDEBUG(D_DLMTRACE|D_TRACE, "acquiring reference: %d %p %lu\n",
315 atomic_read(&lock->cll_ref), lock, RETIP);
316 atomic_inc(&lock->cll_ref);
318 EXPORT_SYMBOL(cl_lock_get);
321 * Acquires a reference to a lock.
323 * This is much like cl_lock_get(), except that this function can be used to
324 * acquire initial reference to the cached lock. Caller has to deal with all
325 * possible races. Use with care!
327 * \see cl_page_get_trust()
329 void cl_lock_get_trust(struct cl_lock *lock)
331 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
333 LASSERT(cl_is_lock(lock));
334 CDEBUG(D_DLMTRACE|D_TRACE, "acquiring trusted reference: %d %p %lu\n",
335 atomic_read(&lock->cll_ref), lock, RETIP);
336 if (atomic_inc_return(&lock->cll_ref) == 1)
337 atomic_inc(&site->cs_locks.cs_busy);
339 EXPORT_SYMBOL(cl_lock_get_trust);
342 * Helper function destroying the lock that wasn't completely initialized.
344 * Other threads can acquire references to the top-lock through its
345 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
347 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
349 cl_lock_mutex_get(env, lock);
350 cl_lock_delete(env, lock);
351 cl_lock_mutex_put(env, lock);
352 cl_lock_put(env, lock);
355 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
356 struct cl_object *obj,
357 const struct cl_io *io,
358 const struct cl_lock_descr *descr)
360 struct cl_lock *lock;
361 struct lu_object_header *head;
362 struct cl_site *site = cl_object_site(obj);
365 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
367 atomic_set(&lock->cll_ref, 1);
368 lock->cll_descr = *descr;
369 lock->cll_state = CLS_NEW;
371 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
373 CFS_INIT_LIST_HEAD(&lock->cll_layers);
374 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
375 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
376 lu_ref_init(&lock->cll_reference);
377 lu_ref_init(&lock->cll_holders);
378 mutex_init(&lock->cll_guard);
379 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
380 cfs_waitq_init(&lock->cll_wq);
381 head = obj->co_lu.lo_header;
382 atomic_inc(&site->cs_locks_state[CLS_NEW]);
383 atomic_inc(&site->cs_locks.cs_total);
384 atomic_inc(&site->cs_locks.cs_created);
385 cl_lock_lockdep_init(lock);
386 list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
389 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
391 cl_lock_finish(env, lock);
397 lock = ERR_PTR(-ENOMEM);
402 * Transfer the lock into INTRANSIT state and return the original state.
404 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
405 * \post state: CLS_INTRANSIT
408 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
409 struct cl_lock *lock)
411 enum cl_lock_state state = lock->cll_state;
413 LASSERT(cl_lock_is_mutexed(lock));
414 LASSERT(state != CLS_INTRANSIT);
415 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
416 "Malformed lock state %d.\n", state);
418 cl_lock_state_set(env, lock, CLS_INTRANSIT);
419 lock->cll_intransit_owner = cfs_current();
420 cl_lock_hold_add(env, lock, "intransit", cfs_current());
423 EXPORT_SYMBOL(cl_lock_intransit);
426 * Exit the intransit state and restore the lock state to the original state
428 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
429 enum cl_lock_state state)
431 LASSERT(cl_lock_is_mutexed(lock));
432 LASSERT(lock->cll_state == CLS_INTRANSIT);
433 LASSERT(state != CLS_INTRANSIT);
434 LASSERT(lock->cll_intransit_owner == cfs_current());
436 lock->cll_intransit_owner = NULL;
437 cl_lock_state_set(env, lock, state);
438 cl_lock_unhold(env, lock, "intransit", cfs_current());
440 EXPORT_SYMBOL(cl_lock_extransit);
443 * Checking whether the lock is intransit state
445 int cl_lock_is_intransit(struct cl_lock *lock)
447 LASSERT(cl_lock_is_mutexed(lock));
448 return lock->cll_state == CLS_INTRANSIT &&
449 lock->cll_intransit_owner != cfs_current();
451 EXPORT_SYMBOL(cl_lock_is_intransit);
453 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
454 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
455 * cover multiple stripes and can trigger cascading timeouts.
457 static int cl_lock_fits_into(const struct lu_env *env,
458 const struct cl_lock *lock,
459 const struct cl_lock_descr *need,
460 const struct cl_io *io)
462 const struct cl_lock_slice *slice;
464 LINVRNT(cl_lock_invariant_trusted(env, lock));
466 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
467 if (slice->cls_ops->clo_fits_into != NULL &&
468 !slice->cls_ops->clo_fits_into(env, slice, need, io))
474 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
475 struct cl_object *obj,
476 const struct cl_io *io,
477 const struct cl_lock_descr *need)
479 struct cl_lock *lock;
480 struct cl_object_header *head;
481 struct cl_site *site;
485 head = cl_object_header(obj);
486 site = cl_object_site(obj);
487 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
488 atomic_inc(&site->cs_locks.cs_lookup);
489 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
492 LASSERT(cl_is_lock(lock));
493 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
494 lock->cll_state < CLS_FREEING &&
495 !(lock->cll_flags & CLF_CANCELLED) &&
496 cl_lock_fits_into(env, lock, need, io);
497 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
498 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
501 cl_lock_get_trust(lock);
502 /* move the lock to the LRU head */
503 list_move(&lock->cll_linkage, &head->coh_locks);
504 atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
512 * Returns a lock matching description \a need.
514 * This is the main entry point into the cl_lock caching interface. First, a
515 * cache (implemented as a per-object linked list) is consulted. If lock is
516 * found there, it is returned immediately. Otherwise new lock is allocated
517 * and returned. In any case, additional reference to lock is acquired.
519 * \see cl_object_find(), cl_page_find()
521 static struct cl_lock *cl_lock_find(const struct lu_env *env,
522 const struct cl_io *io,
523 const struct cl_lock_descr *need)
525 struct cl_object_header *head;
526 struct cl_object *obj;
527 struct cl_lock *lock;
528 struct cl_site *site;
533 head = cl_object_header(obj);
534 site = cl_object_site(obj);
536 spin_lock(&head->coh_lock_guard);
537 lock = cl_lock_lookup(env, obj, io, need);
538 spin_unlock(&head->coh_lock_guard);
541 lock = cl_lock_alloc(env, obj, io, need);
543 struct cl_lock *ghost;
545 spin_lock(&head->coh_lock_guard);
546 ghost = cl_lock_lookup(env, obj, io, need);
548 list_add(&lock->cll_linkage, &head->coh_locks);
549 spin_unlock(&head->coh_lock_guard);
550 atomic_inc(&site->cs_locks.cs_busy);
552 spin_unlock(&head->coh_lock_guard);
554 * Other threads can acquire references to the
555 * top-lock through its sub-locks. Hence, it
556 * cannot be cl_lock_free()-ed immediately.
558 cl_lock_finish(env, lock);
567 * Returns existing lock matching given description. This is similar to
568 * cl_lock_find() except that no new lock is created, and returned lock is
569 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
571 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
572 const struct cl_lock_descr *need,
573 const char *scope, const void *source)
575 struct cl_object_header *head;
576 struct cl_object *obj;
577 struct cl_lock *lock;
581 head = cl_object_header(obj);
583 spin_lock(&head->coh_lock_guard);
584 lock = cl_lock_lookup(env, obj, io, need);
585 spin_unlock(&head->coh_lock_guard);
590 cl_lock_mutex_get(env, lock);
591 if (lock->cll_state == CLS_INTRANSIT)
592 cl_lock_state_wait(env, lock); /* Don't care return value. */
593 if (lock->cll_state == CLS_CACHED) {
595 result = cl_use_try(env, lock, 1);
597 cl_lock_error(env, lock, result);
599 ok = lock->cll_state == CLS_HELD;
601 cl_lock_hold_add(env, lock, scope, source);
602 cl_lock_user_add(env, lock);
603 cl_lock_put(env, lock);
605 cl_lock_mutex_put(env, lock);
607 cl_lock_put(env, lock);
613 EXPORT_SYMBOL(cl_lock_peek);
616 * Returns a slice within a lock, corresponding to the given layer in the
621 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
622 const struct lu_device_type *dtype)
624 const struct cl_lock_slice *slice;
626 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
629 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
630 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
635 EXPORT_SYMBOL(cl_lock_at);
637 static void cl_lock_trace(struct cl_thread_counters *counters,
638 const char *prefix, const struct cl_lock *lock)
640 CDEBUG(D_DLMTRACE|D_TRACE, "%s: %i@%p %p %i %i\n", prefix,
641 atomic_read(&lock->cll_ref), lock, lock->cll_guarder,
642 lock->cll_depth, counters->ctc_nr_locks_locked);
645 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
647 struct cl_thread_counters *counters;
649 counters = cl_lock_counters(env, lock);
651 counters->ctc_nr_locks_locked++;
652 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
653 cl_lock_trace(counters, "got mutex", lock);
657 * Locks cl_lock object.
659 * This is used to manipulate cl_lock fields, and to serialize state
660 * transitions in the lock state machine.
662 * \post cl_lock_is_mutexed(lock)
664 * \see cl_lock_mutex_put()
666 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
668 LINVRNT(cl_lock_invariant(env, lock));
670 if (lock->cll_guarder == cfs_current()) {
671 LINVRNT(cl_lock_is_mutexed(lock));
672 LINVRNT(lock->cll_depth > 0);
674 struct cl_object_header *hdr;
675 struct cl_thread_info *info;
678 LINVRNT(lock->cll_guarder != cfs_current());
679 hdr = cl_object_header(lock->cll_descr.cld_obj);
681 * Check that mutices are taken in the bottom-to-top order.
683 info = cl_env_info(env);
684 for (i = 0; i < hdr->coh_nesting; ++i)
685 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
686 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
687 lock->cll_guarder = cfs_current();
688 LINVRNT(lock->cll_depth == 0);
690 cl_lock_mutex_tail(env, lock);
692 EXPORT_SYMBOL(cl_lock_mutex_get);
695 * Try-locks cl_lock object.
697 * \retval 0 \a lock was successfully locked
699 * \retval -EBUSY \a lock cannot be locked right now
701 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
703 * \see cl_lock_mutex_get()
705 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
709 LINVRNT(cl_lock_invariant_trusted(env, lock));
713 if (lock->cll_guarder == cfs_current()) {
714 LINVRNT(lock->cll_depth > 0);
715 cl_lock_mutex_tail(env, lock);
716 } else if (mutex_trylock(&lock->cll_guard)) {
717 LINVRNT(lock->cll_depth == 0);
718 lock->cll_guarder = cfs_current();
719 cl_lock_mutex_tail(env, lock);
724 EXPORT_SYMBOL(cl_lock_mutex_try);
727 {* Unlocks cl_lock object.
729 * \pre cl_lock_is_mutexed(lock)
731 * \see cl_lock_mutex_get()
733 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
735 struct cl_thread_counters *counters;
737 LINVRNT(cl_lock_invariant(env, lock));
738 LINVRNT(cl_lock_is_mutexed(lock));
739 LINVRNT(lock->cll_guarder == cfs_current());
740 LINVRNT(lock->cll_depth > 0);
742 counters = cl_lock_counters(env, lock);
743 LINVRNT(counters->ctc_nr_locks_locked > 0);
745 cl_lock_trace(counters, "put mutex", lock);
746 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
747 counters->ctc_nr_locks_locked--;
748 if (--lock->cll_depth == 0) {
749 lock->cll_guarder = NULL;
750 mutex_unlock(&lock->cll_guard);
753 EXPORT_SYMBOL(cl_lock_mutex_put);
756 * Returns true iff lock's mutex is owned by the current thread.
758 int cl_lock_is_mutexed(struct cl_lock *lock)
760 return lock->cll_guarder == cfs_current();
762 EXPORT_SYMBOL(cl_lock_is_mutexed);
765 * Returns number of cl_lock mutices held by the current thread (environment).
767 int cl_lock_nr_mutexed(const struct lu_env *env)
769 struct cl_thread_info *info;
774 * NOTE: if summation across all nesting levels (currently 2) proves
775 * too expensive, a summary counter can be added to
776 * struct cl_thread_info.
778 info = cl_env_info(env);
779 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
780 locked += info->clt_counters[i].ctc_nr_locks_locked;
783 EXPORT_SYMBOL(cl_lock_nr_mutexed);
785 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
787 LINVRNT(cl_lock_is_mutexed(lock));
788 LINVRNT(cl_lock_invariant(env, lock));
790 if (!(lock->cll_flags & CLF_CANCELLED)) {
791 const struct cl_lock_slice *slice;
793 lock->cll_flags |= CLF_CANCELLED;
794 list_for_each_entry_reverse(slice, &lock->cll_layers,
796 if (slice->cls_ops->clo_cancel != NULL)
797 slice->cls_ops->clo_cancel(env, slice);
803 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
805 struct cl_object_header *head;
806 const struct cl_lock_slice *slice;
808 LINVRNT(cl_lock_is_mutexed(lock));
809 LINVRNT(cl_lock_invariant(env, lock));
812 if (lock->cll_state < CLS_FREEING) {
813 cl_lock_state_set(env, lock, CLS_FREEING);
815 head = cl_object_header(lock->cll_descr.cld_obj);
817 spin_lock(&head->coh_lock_guard);
818 list_del_init(&lock->cll_linkage);
820 spin_unlock(&head->coh_lock_guard);
822 * From now on, no new references to this lock can be acquired
823 * by cl_lock_lookup().
825 list_for_each_entry_reverse(slice, &lock->cll_layers,
827 if (slice->cls_ops->clo_delete != NULL)
828 slice->cls_ops->clo_delete(env, slice);
831 * From now on, no new references to this lock can be acquired
832 * by layer-specific means (like a pointer from struct
833 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
836 * Lock will be finally freed in cl_lock_put() when last of
837 * existing references goes away.
844 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
845 * top-lock (nesting == 0) accounts for this modification in the per-thread
846 * debugging counters. Sub-lock holds can be released by a thread different
847 * from one that acquired it.
849 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
852 struct cl_thread_counters *counters;
853 enum clt_nesting_level nesting;
855 lock->cll_holds += delta;
856 nesting = cl_lock_nesting(lock);
857 if (nesting == CNL_TOP) {
858 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
859 counters->ctc_nr_held += delta;
860 LASSERT(counters->ctc_nr_held >= 0);
865 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
866 * cl_lock_hold_mod() for the explanation of the debugging code.
868 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
871 struct cl_thread_counters *counters;
872 enum clt_nesting_level nesting;
874 lock->cll_users += delta;
875 nesting = cl_lock_nesting(lock);
876 if (nesting == CNL_TOP) {
877 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
878 counters->ctc_nr_used += delta;
879 LASSERT(counters->ctc_nr_used >= 0);
883 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
884 const char *scope, const void *source)
886 LINVRNT(cl_lock_is_mutexed(lock));
887 LINVRNT(cl_lock_invariant(env, lock));
888 LASSERT(lock->cll_holds > 0);
891 lu_ref_del(&lock->cll_holders, scope, source);
892 cl_lock_hold_mod(env, lock, -1);
893 if (lock->cll_holds == 0) {
894 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
895 lock->cll_descr.cld_mode == CLM_GROUP)
897 * If lock is still phantom or grouplock when user is
898 * done with it---destroy the lock.
900 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
901 if (lock->cll_flags & CLF_CANCELPEND) {
902 lock->cll_flags &= ~CLF_CANCELPEND;
903 cl_lock_cancel0(env, lock);
905 if (lock->cll_flags & CLF_DOOMED) {
906 /* no longer doomed: it's dead... Jim. */
907 lock->cll_flags &= ~CLF_DOOMED;
908 cl_lock_delete0(env, lock);
916 * Waits until lock state is changed.
918 * This function is called with cl_lock mutex locked, atomically releases
919 * mutex and goes to sleep, waiting for a lock state change (signaled by
920 * cl_lock_signal()), and re-acquires the mutex before return.
922 * This function is used to wait until lock state machine makes some progress
923 * and to emulate synchronous operations on top of asynchronous lock
926 * \retval -EINTR wait was interrupted
928 * \retval 0 wait wasn't interrupted
930 * \pre cl_lock_is_mutexed(lock)
932 * \see cl_lock_signal()
934 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
936 cfs_waitlink_t waiter;
940 LINVRNT(cl_lock_is_mutexed(lock));
941 LINVRNT(cl_lock_invariant(env, lock));
942 LASSERT(lock->cll_depth == 1);
943 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
945 result = lock->cll_error;
947 cfs_waitlink_init(&waiter);
948 cfs_waitq_add(&lock->cll_wq, &waiter);
949 set_current_state(CFS_TASK_INTERRUPTIBLE);
950 cl_lock_mutex_put(env, lock);
952 LASSERT(cl_lock_nr_mutexed(env) == 0);
953 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
955 cl_lock_mutex_get(env, lock);
956 set_current_state(CFS_TASK_RUNNING);
957 cfs_waitq_del(&lock->cll_wq, &waiter);
958 result = cfs_signal_pending() ? -EINTR : 0;
962 EXPORT_SYMBOL(cl_lock_state_wait);
964 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
965 enum cl_lock_state state)
967 const struct cl_lock_slice *slice;
970 LINVRNT(cl_lock_is_mutexed(lock));
971 LINVRNT(cl_lock_invariant(env, lock));
973 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
974 if (slice->cls_ops->clo_state != NULL)
975 slice->cls_ops->clo_state(env, slice, state);
976 cfs_waitq_broadcast(&lock->cll_wq);
981 * Notifies waiters that lock state changed.
983 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
984 * layers about state change by calling cl_lock_operations::clo_state()
987 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
990 cl_lock_state_signal(env, lock, lock->cll_state);
993 EXPORT_SYMBOL(cl_lock_signal);
996 * Changes lock state.
998 * This function is invoked to notify layers that lock state changed, possible
999 * as a result of an asynchronous event such as call-back reception.
1001 * \post lock->cll_state == state
1003 * \see cl_lock_operations::clo_state()
1005 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1006 enum cl_lock_state state)
1008 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1011 LASSERT(lock->cll_state <= state ||
1012 (lock->cll_state == CLS_CACHED &&
1013 (state == CLS_HELD || /* lock found in cache */
1014 state == CLS_NEW || /* sub-lock canceled */
1015 state == CLS_INTRANSIT)) ||
1016 /* lock is in transit state */
1017 lock->cll_state == CLS_INTRANSIT);
1019 if (lock->cll_state != state) {
1020 atomic_dec(&site->cs_locks_state[lock->cll_state]);
1021 atomic_inc(&site->cs_locks_state[state]);
1023 cl_lock_state_signal(env, lock, state);
1024 lock->cll_state = state;
1028 EXPORT_SYMBOL(cl_lock_state_set);
1030 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1032 const struct cl_lock_slice *slice;
1038 if (lock->cll_error != 0)
1041 LINVRNT(cl_lock_is_mutexed(lock));
1042 LINVRNT(cl_lock_invariant(env, lock));
1043 LASSERT(lock->cll_state == CLS_INTRANSIT);
1044 LASSERT(lock->cll_users > 0);
1045 LASSERT(lock->cll_holds > 0);
1048 list_for_each_entry_reverse(slice, &lock->cll_layers,
1050 if (slice->cls_ops->clo_unuse != NULL) {
1051 result = slice->cls_ops->clo_unuse(env, slice);
1056 LASSERT(result != -ENOSYS);
1057 } while (result == CLO_REPEAT);
1059 return result ?: lock->cll_error;
1063 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1064 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1065 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1066 * use process atomic
1068 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1070 const struct cl_lock_slice *slice;
1072 enum cl_lock_state state;
1077 state = cl_lock_intransit(env, lock);
1078 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1079 if (slice->cls_ops->clo_use != NULL) {
1080 result = slice->cls_ops->clo_use(env, slice);
1085 LASSERT(result != -ENOSYS);
1087 LASSERT(lock->cll_state == CLS_INTRANSIT);
1092 if (result == -ESTALE) {
1094 * ESTALE means sublock being cancelled
1095 * at this time, and set lock state to
1096 * be NEW here and ask the caller to repeat.
1099 result = CLO_REPEAT;
1102 /* @atomic means back-off-on-failure. */
1107 rc = cl_unuse_try_internal(env, lock);
1111 rc = cl_lock_state_wait(env, lock);
1116 /* Vet the results. */
1117 if (rc < 0 && result > 0)
1122 cl_lock_extransit(env, lock, state);
1125 EXPORT_SYMBOL(cl_use_try);
1128 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1131 static int cl_enqueue_kick(const struct lu_env *env,
1132 struct cl_lock *lock,
1133 struct cl_io *io, __u32 flags)
1136 const struct cl_lock_slice *slice;
1140 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1141 if (slice->cls_ops->clo_enqueue != NULL) {
1142 result = slice->cls_ops->clo_enqueue(env,
1148 LASSERT(result != -ENOSYS);
1153 * Tries to enqueue a lock.
1155 * This function is called repeatedly by cl_enqueue() until either lock is
1156 * enqueued, or error occurs. This function does not block waiting for
1157 * networking communication to complete.
1159 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1160 * lock->cll_state == CLS_HELD)
1162 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1163 * \see cl_lock_state::CLS_ENQUEUED
1165 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1166 struct cl_io *io, __u32 flags)
1174 LINVRNT(cl_lock_is_mutexed(lock));
1176 if (lock->cll_error != 0)
1178 switch (lock->cll_state) {
1180 cl_lock_state_set(env, lock, CLS_QUEUING);
1184 result = cl_enqueue_kick(env, lock, io, flags);
1186 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1189 LASSERT(cl_lock_is_intransit(lock));
1193 /* yank lock from the cache. */
1194 result = cl_use_try(env, lock, 0);
1203 * impossible, only held locks with increased
1204 * ->cll_holds can be enqueued, and they cannot be
1209 } while (result == CLO_REPEAT);
1211 cl_lock_error(env, lock, result);
1212 RETURN(result ?: lock->cll_error);
1214 EXPORT_SYMBOL(cl_enqueue_try);
1216 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1217 struct cl_io *io, __u32 enqflags)
1223 LINVRNT(cl_lock_is_mutexed(lock));
1224 LINVRNT(cl_lock_invariant(env, lock));
1225 LASSERT(lock->cll_holds > 0);
1227 cl_lock_user_add(env, lock);
1229 result = cl_enqueue_try(env, lock, io, enqflags);
1230 if (result == CLO_WAIT) {
1231 result = cl_lock_state_wait(env, lock);
1238 cl_lock_user_del(env, lock);
1239 if (result != -EINTR)
1240 cl_lock_error(env, lock, result);
1242 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1243 lock->cll_state == CLS_HELD));
1250 * \pre current thread or io owns a hold on lock.
1252 * \post ergo(result == 0, lock->users increased)
1253 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1254 * lock->cll_state == CLS_HELD)
1256 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1257 struct cl_io *io, __u32 enqflags)
1263 cl_lock_lockdep_acquire(env, lock, enqflags);
1264 cl_lock_mutex_get(env, lock);
1265 result = cl_enqueue_locked(env, lock, io, enqflags);
1266 cl_lock_mutex_put(env, lock);
1268 cl_lock_lockdep_release(env, lock);
1269 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1270 lock->cll_state == CLS_HELD));
1273 EXPORT_SYMBOL(cl_enqueue);
1276 * Tries to unlock a lock.
1278 * This function is called repeatedly by cl_unuse() until either lock is
1279 * unlocked, or error occurs.
1281 * \pre lock->cll_state <= CLS_HELD || cl_lock_is_intransit(lock)
1283 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1285 * \see cl_unuse() cl_lock_operations::clo_unuse()
1286 * \see cl_lock_state::CLS_CACHED
1288 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1291 enum cl_lock_state state = CLS_NEW;
1294 if (lock->cll_state != CLS_INTRANSIT) {
1295 if (lock->cll_users > 1) {
1296 cl_lock_user_del(env, lock);
1300 * New lock users (->cll_users) are not protecting unlocking
1301 * from proceeding. From this point, lock eventually reaches
1302 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1305 state = cl_lock_intransit(env, lock);
1308 result = cl_unuse_try_internal(env, lock);
1309 LASSERT(lock->cll_state == CLS_INTRANSIT);
1310 if (result != CLO_WAIT)
1312 * Once there is no more need to iterate ->clo_unuse() calls,
1313 * remove lock user. This is done even if unrecoverable error
1314 * happened during unlocking, because nothing else can be
1317 cl_lock_user_del(env, lock);
1318 if (result == 0 || result == -ESTALE) {
1320 * Return lock back to the cache. This is the only
1321 * place where lock is moved into CLS_CACHED state.
1323 * If one of ->clo_unuse() methods returned -ESTALE, lock
1324 * cannot be placed into cache and has to be
1325 * re-initialized. This happens e.g., when a sub-lock was
1326 * canceled while unlocking was in progress.
1328 state = result == 0 ? CLS_CACHED : CLS_NEW;
1329 cl_lock_extransit(env, lock, state);
1332 * Hide -ESTALE error.
1333 * If the lock is a glimpse lock, and it has multiple
1334 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1335 * and other sublocks are matched write locks. In this case,
1336 * we can't set this lock to error because otherwise some of
1337 * its sublocks may not be canceled. This causes some dirty
1338 * pages won't be written to OSTs. -jay
1342 CWARN("result = %d, this is unlikely!\n", result);
1343 cl_lock_extransit(env, lock, state);
1346 result = result ?: lock->cll_error;
1348 cl_lock_error(env, lock, result);
1351 EXPORT_SYMBOL(cl_unuse_try);
1353 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1356 LASSERT(lock->cll_state <= CLS_HELD);
1360 result = cl_unuse_try(env, lock);
1361 if (result == CLO_WAIT) {
1362 result = cl_lock_state_wait(env, lock);
1374 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1377 cl_lock_mutex_get(env, lock);
1378 cl_unuse_locked(env, lock);
1379 cl_lock_mutex_put(env, lock);
1380 cl_lock_lockdep_release(env, lock);
1383 EXPORT_SYMBOL(cl_unuse);
1386 * Tries to wait for a lock.
1388 * This function is called repeatedly by cl_wait() until either lock is
1389 * granted, or error occurs. This function does not block waiting for network
1390 * communication to complete.
1392 * \see cl_wait() cl_lock_operations::clo_wait()
1393 * \see cl_lock_state::CLS_HELD
1395 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1397 const struct cl_lock_slice *slice;
1402 LINVRNT(cl_lock_is_mutexed(lock));
1403 LINVRNT(cl_lock_invariant(env, lock));
1404 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1405 lock->cll_state == CLS_HELD ||
1406 lock->cll_state == CLS_INTRANSIT);
1407 LASSERT(lock->cll_users > 0);
1408 LASSERT(lock->cll_holds > 0);
1411 if (lock->cll_error != 0)
1414 if (cl_lock_is_intransit(lock)) {
1419 if (lock->cll_state == CLS_HELD)
1424 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1425 if (slice->cls_ops->clo_wait != NULL) {
1426 result = slice->cls_ops->clo_wait(env, slice);
1431 LASSERT(result != -ENOSYS);
1433 cl_lock_state_set(env, lock, CLS_HELD);
1434 } while (result == CLO_REPEAT);
1435 RETURN(result ?: lock->cll_error);
1437 EXPORT_SYMBOL(cl_wait_try);
1440 * Waits until enqueued lock is granted.
1442 * \pre current thread or io owns a hold on the lock
1443 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1444 * lock->cll_state == CLS_HELD)
1446 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1448 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1453 cl_lock_mutex_get(env, lock);
1455 LINVRNT(cl_lock_invariant(env, lock));
1456 LASSERT(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD);
1457 LASSERT(lock->cll_holds > 0);
1460 result = cl_wait_try(env, lock);
1461 if (result == CLO_WAIT) {
1462 result = cl_lock_state_wait(env, lock);
1469 cl_lock_user_del(env, lock);
1470 if (result != -EINTR)
1471 cl_lock_error(env, lock, result);
1472 cl_lock_lockdep_release(env, lock);
1474 cl_lock_mutex_put(env, lock);
1475 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1478 EXPORT_SYMBOL(cl_wait);
1481 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1484 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1486 const struct cl_lock_slice *slice;
1487 unsigned long pound;
1488 unsigned long ounce;
1491 LINVRNT(cl_lock_is_mutexed(lock));
1492 LINVRNT(cl_lock_invariant(env, lock));
1495 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1496 if (slice->cls_ops->clo_weigh != NULL) {
1497 ounce = slice->cls_ops->clo_weigh(env, slice);
1499 if (pound < ounce) /* over-weight^Wflow */
1505 EXPORT_SYMBOL(cl_lock_weigh);
1508 * Notifies layers that lock description changed.
1510 * The server can grant client a lock different from one that was requested
1511 * (e.g., larger in extent). This method is called when actually granted lock
1512 * description becomes known to let layers to accommodate for changed lock
1515 * \see cl_lock_operations::clo_modify()
1517 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1518 const struct cl_lock_descr *desc)
1520 const struct cl_lock_slice *slice;
1521 struct cl_object *obj = lock->cll_descr.cld_obj;
1522 struct cl_object_header *hdr = cl_object_header(obj);
1526 /* don't allow object to change */
1527 LASSERT(obj == desc->cld_obj);
1528 LINVRNT(cl_lock_is_mutexed(lock));
1529 LINVRNT(cl_lock_invariant(env, lock));
1531 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1532 if (slice->cls_ops->clo_modify != NULL) {
1533 result = slice->cls_ops->clo_modify(env, slice, desc);
1538 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1539 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1541 * Just replace description in place. Nothing more is needed for
1542 * now. If locks were indexed according to their extent and/or mode,
1543 * that index would have to be updated here.
1545 spin_lock(&hdr->coh_lock_guard);
1546 lock->cll_descr = *desc;
1547 spin_unlock(&hdr->coh_lock_guard);
1550 EXPORT_SYMBOL(cl_lock_modify);
1553 * Initializes lock closure with a given origin.
1555 * \see cl_lock_closure
1557 void cl_lock_closure_init(const struct lu_env *env,
1558 struct cl_lock_closure *closure,
1559 struct cl_lock *origin, int wait)
1561 LINVRNT(cl_lock_is_mutexed(origin));
1562 LINVRNT(cl_lock_invariant(env, origin));
1564 CFS_INIT_LIST_HEAD(&closure->clc_list);
1565 closure->clc_origin = origin;
1566 closure->clc_wait = wait;
1567 closure->clc_nr = 0;
1569 EXPORT_SYMBOL(cl_lock_closure_init);
1572 * Builds a closure of \a lock.
1574 * Building of a closure consists of adding initial lock (\a lock) into it,
1575 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1576 * methods might call cl_lock_closure_build() recursively again, adding more
1577 * locks to the closure, etc.
1579 * \see cl_lock_closure
1581 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1582 struct cl_lock_closure *closure)
1584 const struct cl_lock_slice *slice;
1588 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1589 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1591 result = cl_lock_enclosure(env, lock, closure);
1593 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1594 if (slice->cls_ops->clo_closure != NULL) {
1595 result = slice->cls_ops->clo_closure(env, slice,
1603 cl_lock_disclosure(env, closure);
1606 EXPORT_SYMBOL(cl_lock_closure_build);
1609 * Adds new lock to a closure.
1611 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1612 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1613 * until next try-lock is likely to succeed.
1615 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1616 struct cl_lock_closure *closure)
1620 if (!cl_lock_mutex_try(env, lock)) {
1622 * If lock->cll_inclosure is not empty, lock is already in
1625 if (list_empty(&lock->cll_inclosure)) {
1626 cl_lock_get_trust(lock);
1627 lu_ref_add(&lock->cll_reference, "closure", closure);
1628 list_add(&lock->cll_inclosure, &closure->clc_list);
1631 cl_lock_mutex_put(env, lock);
1634 cl_lock_disclosure(env, closure);
1635 if (closure->clc_wait) {
1636 cl_lock_get_trust(lock);
1637 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1638 cl_lock_mutex_put(env, closure->clc_origin);
1640 LASSERT(cl_lock_nr_mutexed(env) == 0);
1641 cl_lock_mutex_get(env, lock);
1642 cl_lock_mutex_put(env, lock);
1644 cl_lock_mutex_get(env, closure->clc_origin);
1645 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1646 cl_lock_put(env, lock);
1648 result = CLO_REPEAT;
1652 EXPORT_SYMBOL(cl_lock_enclosure);
1654 /** Releases mutices of enclosed locks. */
1655 void cl_lock_disclosure(const struct lu_env *env,
1656 struct cl_lock_closure *closure)
1658 struct cl_lock *scan;
1659 struct cl_lock *temp;
1661 list_for_each_entry_safe(scan, temp, &closure->clc_list, cll_inclosure){
1662 list_del_init(&scan->cll_inclosure);
1663 cl_lock_mutex_put(env, scan);
1664 lu_ref_del(&scan->cll_reference, "closure", closure);
1665 cl_lock_put(env, scan);
1668 LASSERT(closure->clc_nr == 0);
1670 EXPORT_SYMBOL(cl_lock_disclosure);
1672 /** Finalizes a closure. */
1673 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1675 LASSERT(closure->clc_nr == 0);
1676 LASSERT(list_empty(&closure->clc_list));
1678 EXPORT_SYMBOL(cl_lock_closure_fini);
1681 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1682 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1683 * destruction until all holds are released. This is called when a decision is
1684 * made to destroy the lock in the future. E.g., when a blocking AST is
1685 * received on it, or fatal communication error happens.
1687 * Caller must have a reference on this lock to prevent a situation, when
1688 * deleted lock lingers in memory for indefinite time, because nobody calls
1689 * cl_lock_put() to finish it.
1691 * \pre atomic_read(&lock->cll_ref) > 0
1692 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1693 * cl_lock_nr_mutexed(env) == 1)
1694 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1695 * held, as deletion of sub-locks might require releasing a top-lock
1698 * \see cl_lock_operations::clo_delete()
1699 * \see cl_lock::cll_holds
1701 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1703 LINVRNT(cl_lock_is_mutexed(lock));
1704 LINVRNT(cl_lock_invariant(env, lock));
1705 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1706 cl_lock_nr_mutexed(env) == 1));
1709 if (lock->cll_holds == 0)
1710 cl_lock_delete0(env, lock);
1712 lock->cll_flags |= CLF_DOOMED;
1715 EXPORT_SYMBOL(cl_lock_delete);
1718 * Mark lock as irrecoverably failed, and mark it for destruction. This
1719 * happens when, e.g., server fails to grant a lock to us, or networking
1722 * \pre atomic_read(&lock->cll_ref) > 0
1724 * \see clo_lock_delete()
1725 * \see cl_lock::cll_holds
1727 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1729 LINVRNT(cl_lock_is_mutexed(lock));
1730 LINVRNT(cl_lock_invariant(env, lock));
1733 if (lock->cll_error == 0 && error != 0) {
1734 lock->cll_error = error;
1735 cl_lock_signal(env, lock);
1736 cl_lock_cancel(env, lock);
1737 cl_lock_delete(env, lock);
1741 EXPORT_SYMBOL(cl_lock_error);
1744 * Cancels this lock. Notifies layers
1745 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1746 * there are holds on the lock, postpone cancellation until
1747 * all holds are released.
1749 * Cancellation notification is delivered to layers at most once.
1751 * \see cl_lock_operations::clo_cancel()
1752 * \see cl_lock::cll_holds
1754 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1756 LINVRNT(cl_lock_is_mutexed(lock));
1757 LINVRNT(cl_lock_invariant(env, lock));
1760 if (lock->cll_holds == 0)
1761 cl_lock_cancel0(env, lock);
1763 lock->cll_flags |= CLF_CANCELPEND;
1766 EXPORT_SYMBOL(cl_lock_cancel);
1769 * Finds an existing lock covering given page and optionally different from a
1770 * given \a except lock.
1772 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1773 struct cl_page *page, struct cl_lock *except,
1774 int pending, int canceld)
1776 struct cl_object_header *head;
1777 struct cl_lock *scan;
1778 struct cl_lock *lock;
1779 struct cl_lock_descr *need;
1783 head = cl_object_header(obj);
1784 need = &cl_env_info(env)->clt_descr;
1787 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1789 need->cld_start = need->cld_end = page->cp_index;
1791 spin_lock(&head->coh_lock_guard);
1792 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1793 if (scan != except &&
1794 cl_lock_ext_match(&scan->cll_descr, need) &&
1795 scan->cll_state >= CLS_HELD &&
1796 scan->cll_state < CLS_FREEING &&
1798 * This check is racy as the lock can be canceled right
1799 * after it is done, but this is fine, because page exists
1802 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1803 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1804 /* Don't increase cs_hit here since this
1805 * is just a helper function. */
1806 cl_lock_get_trust(scan);
1811 spin_unlock(&head->coh_lock_guard);
1814 EXPORT_SYMBOL(cl_lock_at_page);
1817 * Returns a list of pages protected (only) by a given lock.
1819 * Scans an extent of page radix tree, corresponding to the \a lock and queues
1820 * all pages that are not protected by locks other than \a lock into \a queue.
1822 void cl_lock_page_list_fixup(const struct lu_env *env,
1823 struct cl_io *io, struct cl_lock *lock,
1824 struct cl_page_list *queue)
1826 struct cl_page *page;
1827 struct cl_page *temp;
1828 struct cl_page_list *plist = &cl_env_info(env)->clt_list;
1830 LINVRNT(cl_lock_invariant(env, lock));
1833 /* Now, we have a list of cl_pages under the \a lock, we need
1834 * to check if some of pages are covered by other ldlm lock.
1835 * If this is the case, they aren't needed to be written out this time.
1837 * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1838 * the latter is to be canceled, this means other client is
1839 * reading/writing [200,300] since A won't canceled. Actually
1840 * we just need to write the pages covered by [200,300]. This is safe,
1841 * since [100,200] is also protected lock A.
1844 cl_page_list_init(plist);
1845 cl_page_list_for_each_safe(page, temp, queue) {
1846 pgoff_t idx = page->cp_index;
1847 struct cl_lock *found;
1848 struct cl_lock_descr *descr;
1850 /* The algorithm counts on the index-ascending page index. */
1851 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1852 page->cp_index < temp->cp_index));
1854 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1859 descr = &found->cll_descr;
1860 list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1862 idx = page->cp_index;
1863 if (descr->cld_start > idx || descr->cld_end < idx)
1865 cl_page_list_move(plist, queue, page);
1867 cl_lock_put(env, found);
1870 /* The pages in plist are covered by other locks, don't handle them
1874 cl_page_list_disown(env, io, plist);
1875 cl_page_list_fini(env, plist);
1878 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1881 * Invalidate pages protected by the given lock, sending them out to the
1882 * server first, if necessary.
1884 * This function does the following:
1886 * - collects a list of pages to be invalidated,
1888 * - unmaps them from the user virtual memory,
1890 * - sends dirty pages to the server,
1892 * - waits for transfer completion,
1894 * - discards pages, and throws them out of memory.
1896 * If \a discard is set, pages are discarded without sending them to the
1899 * If error happens on any step, the process continues anyway (the reasoning
1900 * behind this being that lock cancellation cannot be delayed indefinitely).
1902 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1905 struct cl_thread_info *info = cl_env_info(env);
1906 struct cl_io *io = &info->clt_io;
1907 struct cl_2queue *queue = &info->clt_queue;
1908 struct cl_lock_descr *descr = &lock->cll_descr;
1912 LINVRNT(cl_lock_invariant(env, lock));
1915 io->ci_obj = cl_object_top(descr->cld_obj);
1916 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1921 cl_2queue_init(queue);
1922 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1923 descr->cld_end, &queue->c2_qin, nonblock);
1924 page_count = queue->c2_qin.pl_nr;
1925 if (page_count > 0) {
1926 result = cl_page_list_unmap(env, io, &queue->c2_qin);
1928 long timeout = 600; /* 10 minutes. */
1929 /* for debug purpose, if this request can't be
1930 * finished in 10 minutes, we hope it can
1933 result = cl_io_submit_sync(env, io, CRT_WRITE,
1937 CWARN("Writing %lu pages error: %d\n",
1938 page_count, result);
1940 cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
1941 cl_2queue_discard(env, io, queue);
1942 cl_2queue_disown(env, io, queue);
1944 cl_2queue_fini(env, queue);
1951 cl_io_fini(env, io);
1954 EXPORT_SYMBOL(cl_lock_page_out);
1957 * Eliminate all locks for a given object.
1959 * Caller has to guarantee that no lock is in active use.
1961 * \param cancel when this is set, cl_locks_prune() cancels locks before
1964 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1966 struct cl_object_header *head;
1967 struct cl_lock *lock;
1970 head = cl_object_header(obj);
1972 * If locks are destroyed without cancellation, all pages must be
1973 * already destroyed (as otherwise they will be left unprotected).
1975 LASSERT(ergo(!cancel,
1976 head->coh_tree.rnode == NULL && head->coh_pages == 0));
1978 spin_lock(&head->coh_lock_guard);
1979 while (!list_empty(&head->coh_locks)) {
1980 lock = container_of(head->coh_locks.next,
1981 struct cl_lock, cll_linkage);
1982 cl_lock_get_trust(lock);
1983 spin_unlock(&head->coh_lock_guard);
1984 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
1985 cl_lock_mutex_get(env, lock);
1986 if (lock->cll_state < CLS_FREEING) {
1987 LASSERT(lock->cll_holds == 0);
1988 LASSERT(lock->cll_users == 0);
1990 cl_lock_cancel(env, lock);
1991 cl_lock_delete(env, lock);
1993 cl_lock_mutex_put(env, lock);
1994 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
1995 cl_lock_put(env, lock);
1996 spin_lock(&head->coh_lock_guard);
1998 spin_unlock(&head->coh_lock_guard);
2001 EXPORT_SYMBOL(cl_locks_prune);
2004 * Returns true if \a addr is an address of an allocated cl_lock. Used in
2005 * assertions. This check is optimistically imprecise, i.e., it occasionally
2006 * returns true for the incorrect addresses, but if it returns false, then the
2007 * address is guaranteed to be incorrect. (Should be named cl_lockp().)
2011 int cl_is_lock(const void *addr)
2013 return cfs_mem_is_in_cache(addr, cl_lock_kmem);
2015 EXPORT_SYMBOL(cl_is_lock);
2017 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2018 const struct cl_io *io,
2019 const struct cl_lock_descr *need,
2020 const char *scope, const void *source)
2022 struct cl_lock *lock;
2027 lock = cl_lock_find(env, io, need);
2030 cl_lock_mutex_get(env, lock);
2031 if (lock->cll_state < CLS_FREEING) {
2032 cl_lock_hold_mod(env, lock, +1);
2033 lu_ref_add(&lock->cll_holders, scope, source);
2034 lu_ref_add(&lock->cll_reference, scope, source);
2037 cl_lock_mutex_put(env, lock);
2038 cl_lock_put(env, lock);
2044 * Returns a lock matching \a need description with a reference and a hold on
2047 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2048 * guarantees that lock is not in the CLS_FREEING state on return.
2050 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2051 const struct cl_lock_descr *need,
2052 const char *scope, const void *source)
2054 struct cl_lock *lock;
2058 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2060 cl_lock_mutex_put(env, lock);
2063 EXPORT_SYMBOL(cl_lock_hold);
2066 * Main high-level entry point of cl_lock interface that finds existing or
2067 * enqueues new lock matching given description.
2069 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2070 const struct cl_lock_descr *need,
2072 const char *scope, const void *source)
2074 struct cl_lock *lock;
2075 const struct lu_fid *fid;
2081 fid = lu_object_fid(&io->ci_obj->co_lu);
2084 warn = iter >= 16 && IS_PO2(iter);
2085 CDEBUG(warn ? D_WARNING : D_DLMTRACE,
2086 DDESCR"@"DFID" %i %08x `%s'\n",
2087 PDESCR(need), PFID(fid), iter, enqflags, scope);
2088 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2089 if (!IS_ERR(lock)) {
2090 rc = cl_enqueue_locked(env, lock, io, enqflags);
2092 if (cl_lock_fits_into(env, lock, need, io)) {
2093 cl_lock_mutex_put(env, lock);
2094 cl_lock_lockdep_acquire(env,
2098 CL_LOCK_DEBUG(D_WARNING, env, lock,
2099 "got (see bug 17665)\n");
2100 cl_unuse_locked(env, lock);
2102 cl_lock_hold_release(env, lock, scope, source);
2103 cl_lock_mutex_put(env, lock);
2104 lu_ref_del(&lock->cll_reference, scope, source);
2105 cl_lock_put(env, lock);
2113 EXPORT_SYMBOL(cl_lock_request);
2116 * Adds a hold to a known lock.
2118 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2119 const char *scope, const void *source)
2121 LINVRNT(cl_lock_is_mutexed(lock));
2122 LINVRNT(cl_lock_invariant(env, lock));
2123 LASSERT(lock->cll_state != CLS_FREEING);
2126 cl_lock_hold_mod(env, lock, +1);
2128 lu_ref_add(&lock->cll_holders, scope, source);
2129 lu_ref_add(&lock->cll_reference, scope, source);
2132 EXPORT_SYMBOL(cl_lock_hold_add);
2135 * Releases a hold and a reference on a lock, on which caller acquired a
2138 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2139 const char *scope, const void *source)
2141 LINVRNT(cl_lock_invariant(env, lock));
2143 cl_lock_hold_release(env, lock, scope, source);
2144 lu_ref_del(&lock->cll_reference, scope, source);
2145 cl_lock_put(env, lock);
2148 EXPORT_SYMBOL(cl_lock_unhold);
2151 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2153 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2154 const char *scope, const void *source)
2156 LINVRNT(cl_lock_invariant(env, lock));
2158 cl_lock_mutex_get(env, lock);
2159 cl_lock_hold_release(env, lock, scope, source);
2160 cl_lock_mutex_put(env, lock);
2161 lu_ref_del(&lock->cll_reference, scope, source);
2162 cl_lock_put(env, lock);
2165 EXPORT_SYMBOL(cl_lock_release);
2167 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2169 LINVRNT(cl_lock_is_mutexed(lock));
2170 LINVRNT(cl_lock_invariant(env, lock));
2173 cl_lock_used_mod(env, lock, +1);
2176 EXPORT_SYMBOL(cl_lock_user_add);
2178 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2180 LINVRNT(cl_lock_is_mutexed(lock));
2181 LINVRNT(cl_lock_invariant(env, lock));
2182 LASSERT(lock->cll_users > 0);
2185 cl_lock_used_mod(env, lock, -1);
2186 RETURN(lock->cll_users == 0);
2188 EXPORT_SYMBOL(cl_lock_user_del);
2191 * Check if two lock's mode are compatible.
2193 * This returns true iff en-queuing \a lock2 won't cause cancellation of \a
2194 * lock1 even when these locks overlap.
2196 int cl_lock_compatible(const struct cl_lock *lock1, const struct cl_lock *lock2)
2198 enum cl_lock_mode mode1;
2199 enum cl_lock_mode mode2;
2202 mode1 = lock1->cll_descr.cld_mode;
2203 mode2 = lock2->cll_descr.cld_mode;
2204 RETURN(mode2 == CLM_PHANTOM ||
2205 (mode1 == CLM_READ && mode2 == CLM_READ));
2207 EXPORT_SYMBOL(cl_lock_compatible);
2209 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2211 static const char *names[] = {
2212 [CLM_PHANTOM] = "PHANTOM",
2213 [CLM_READ] = "READ",
2214 [CLM_WRITE] = "WRITE",
2215 [CLM_GROUP] = "GROUP"
2217 if (0 <= mode && mode < ARRAY_SIZE(names))
2222 EXPORT_SYMBOL(cl_lock_mode_name);
2225 * Prints human readable representation of a lock description.
2227 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2228 lu_printer_t printer,
2229 const struct cl_lock_descr *descr)
2231 const struct lu_fid *fid;
2233 fid = lu_object_fid(&descr->cld_obj->co_lu);
2234 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2236 EXPORT_SYMBOL(cl_lock_descr_print);
2239 * Prints human readable representation of \a lock to the \a f.
2241 void cl_lock_print(const struct lu_env *env, void *cookie,
2242 lu_printer_t printer, const struct cl_lock *lock)
2244 const struct cl_lock_slice *slice;
2245 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2246 lock, atomic_read(&lock->cll_ref),
2247 lock->cll_state, lock->cll_error, lock->cll_holds,
2248 lock->cll_users, lock->cll_flags);
2249 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2250 (*printer)(env, cookie, " {\n");
2252 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2253 (*printer)(env, cookie, " %s@%p: ",
2254 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2256 if (slice->cls_ops->clo_print != NULL)
2257 slice->cls_ops->clo_print(env, cookie, printer, slice);
2258 (*printer)(env, cookie, "\n");
2260 (*printer)(env, cookie, "} lock@%p\n", lock);
2262 EXPORT_SYMBOL(cl_lock_print);
2264 int cl_lock_init(void)
2266 return lu_kmem_init(cl_lock_caches);
2269 void cl_lock_fini(void)
2271 lu_kmem_fini(cl_lock_caches);