1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /** Lock class of cl_lock::cll_guard */
57 static struct lock_class_key cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
60 static struct lu_kmem_descr cl_lock_caches[] = {
62 .ckd_cache = &cl_lock_kmem,
63 .ckd_name = "cl_lock_kmem",
64 .ckd_size = sizeof (struct cl_lock)
72 * Basic lock invariant that is maintained at all times. Caller either has a
73 * reference to \a lock, or somehow assures that \a lock cannot be freed.
75 * \see cl_lock_invariant()
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78 const struct cl_lock *lock)
82 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83 atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84 lock->cll_holds >= lock->cll_users &&
85 lock->cll_holds >= 0 &&
86 lock->cll_users >= 0 &&
91 * Stronger lock invariant, checking that caller has a reference on a lock.
93 * \see cl_lock_invariant_trusted()
95 static int cl_lock_invariant(const struct lu_env *env,
96 const struct cl_lock *lock)
100 result = atomic_read(&lock->cll_ref) > 0 &&
101 cl_lock_invariant_trusted(env, lock);
102 if (!result && env != NULL)
103 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
108 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
112 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
116 * Returns a set of counters for this lock, depending on a lock nesting.
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119 const struct cl_lock *lock)
121 struct cl_thread_info *info;
122 enum clt_nesting_level nesting;
124 info = cl_env_info(env);
125 nesting = cl_lock_nesting(lock);
126 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127 return &info->clt_counters[nesting];
130 static void cl_lock_trace0(int level, const struct lu_env *env,
131 const char *prefix, const struct cl_lock *lock,
132 const char *func, const int line)
134 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
135 CDEBUG(level, "%s: %p@(%i %p %i %d %d %d %d %lx)"
136 "(%p/%d/%i) at %s():%d\n",
138 atomic_read(&lock->cll_ref), lock->cll_guarder, lock->cll_depth,
139 lock->cll_state, lock->cll_error, lock->cll_holds,
140 lock->cll_users, lock->cll_flags,
141 env, h->coh_nesting, cl_lock_nr_mutexed(env),
144 #define cl_lock_trace(level, env, prefix, lock) \
145 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
147 #define RETIP ((unsigned long)__builtin_return_address(0))
149 #ifdef CONFIG_LOCKDEP
150 static struct lock_class_key cl_lock_key;
152 static void cl_lock_lockdep_init(struct cl_lock *lock)
154 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
157 static void cl_lock_lockdep_acquire(const struct lu_env *env,
158 struct cl_lock *lock, __u32 enqflags)
160 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
161 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
162 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
163 /* check: */ 2, RETIP);
166 static void cl_lock_lockdep_release(const struct lu_env *env,
167 struct cl_lock *lock)
169 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
170 lock_release(&lock->dep_map, 0, RETIP);
173 #else /* !CONFIG_LOCKDEP */
175 static void cl_lock_lockdep_init(struct cl_lock *lock)
177 static void cl_lock_lockdep_acquire(const struct lu_env *env,
178 struct cl_lock *lock, __u32 enqflags)
180 static void cl_lock_lockdep_release(const struct lu_env *env,
181 struct cl_lock *lock)
184 #endif /* !CONFIG_LOCKDEP */
187 * Adds lock slice to the compound lock.
189 * This is called by cl_object_operations::coo_lock_init() methods to add a
190 * per-layer state to the lock. New state is added at the end of
191 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
193 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
195 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
196 struct cl_object *obj,
197 const struct cl_lock_operations *ops)
200 slice->cls_lock = lock;
201 list_add_tail(&slice->cls_linkage, &lock->cll_layers);
202 slice->cls_obj = obj;
203 slice->cls_ops = ops;
206 EXPORT_SYMBOL(cl_lock_slice_add);
209 * Returns true iff a lock with the mode \a has provides at least the same
210 * guarantees as a lock with the mode \a need.
212 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
214 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
215 need == CLM_PHANTOM || need == CLM_GROUP);
216 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
217 has == CLM_PHANTOM || has == CLM_GROUP);
218 CLASSERT(CLM_PHANTOM < CLM_READ);
219 CLASSERT(CLM_READ < CLM_WRITE);
220 CLASSERT(CLM_WRITE < CLM_GROUP);
222 if (has != CLM_GROUP)
227 EXPORT_SYMBOL(cl_lock_mode_match);
230 * Returns true iff extent portions of lock descriptions match.
232 int cl_lock_ext_match(const struct cl_lock_descr *has,
233 const struct cl_lock_descr *need)
236 has->cld_start <= need->cld_start &&
237 has->cld_end >= need->cld_end &&
238 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
239 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
241 EXPORT_SYMBOL(cl_lock_ext_match);
244 * Returns true iff a lock with the description \a has provides at least the
245 * same guarantees as a lock with the description \a need.
247 int cl_lock_descr_match(const struct cl_lock_descr *has,
248 const struct cl_lock_descr *need)
251 cl_object_same(has->cld_obj, need->cld_obj) &&
252 cl_lock_ext_match(has, need);
254 EXPORT_SYMBOL(cl_lock_descr_match);
256 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
258 struct cl_object *obj = lock->cll_descr.cld_obj;
260 LASSERT(cl_is_lock(lock));
261 LINVRNT(!cl_lock_is_mutexed(lock));
264 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
266 while (!list_empty(&lock->cll_layers)) {
267 struct cl_lock_slice *slice;
269 slice = list_entry(lock->cll_layers.next, struct cl_lock_slice,
271 list_del_init(lock->cll_layers.next);
272 slice->cls_ops->clo_fini(env, slice);
274 atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
275 atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
276 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
277 cl_object_put(env, obj);
278 lu_ref_fini(&lock->cll_reference);
279 lu_ref_fini(&lock->cll_holders);
280 mutex_destroy(&lock->cll_guard);
281 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
286 * Releases a reference on a lock.
288 * When last reference is released, lock is returned to the cache, unless it
289 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
292 * \see cl_object_put(), cl_page_put()
294 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
296 struct cl_object *obj;
297 struct cl_object_header *head;
298 struct cl_site *site;
300 LINVRNT(cl_lock_invariant(env, lock));
302 obj = lock->cll_descr.cld_obj;
303 LINVRNT(obj != NULL);
304 head = cl_object_header(obj);
305 site = cl_object_site(obj);
307 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
308 atomic_read(&lock->cll_ref), lock, RETIP);
310 if (atomic_dec_and_test(&lock->cll_ref)) {
311 if (lock->cll_state == CLS_FREEING) {
312 LASSERT(list_empty(&lock->cll_linkage));
313 cl_lock_free(env, lock);
315 atomic_dec(&site->cs_locks.cs_busy);
319 EXPORT_SYMBOL(cl_lock_put);
322 * Acquires an additional reference to a lock.
324 * This can be called only by caller already possessing a reference to \a
327 * \see cl_object_get(), cl_page_get()
329 void cl_lock_get(struct cl_lock *lock)
331 LINVRNT(cl_lock_invariant(NULL, lock));
332 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
333 atomic_read(&lock->cll_ref), lock, RETIP);
334 atomic_inc(&lock->cll_ref);
336 EXPORT_SYMBOL(cl_lock_get);
339 * Acquires a reference to a lock.
341 * This is much like cl_lock_get(), except that this function can be used to
342 * acquire initial reference to the cached lock. Caller has to deal with all
343 * possible races. Use with care!
345 * \see cl_page_get_trust()
347 void cl_lock_get_trust(struct cl_lock *lock)
349 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
351 LASSERT(cl_is_lock(lock));
352 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
353 atomic_read(&lock->cll_ref), lock, RETIP);
354 if (atomic_inc_return(&lock->cll_ref) == 1)
355 atomic_inc(&site->cs_locks.cs_busy);
357 EXPORT_SYMBOL(cl_lock_get_trust);
360 * Helper function destroying the lock that wasn't completely initialized.
362 * Other threads can acquire references to the top-lock through its
363 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
365 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
367 cl_lock_mutex_get(env, lock);
368 cl_lock_delete(env, lock);
369 cl_lock_mutex_put(env, lock);
370 cl_lock_put(env, lock);
373 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
374 struct cl_object *obj,
375 const struct cl_io *io,
376 const struct cl_lock_descr *descr)
378 struct cl_lock *lock;
379 struct lu_object_header *head;
380 struct cl_site *site = cl_object_site(obj);
383 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
385 atomic_set(&lock->cll_ref, 1);
386 lock->cll_descr = *descr;
387 lock->cll_state = CLS_NEW;
389 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
391 CFS_INIT_LIST_HEAD(&lock->cll_layers);
392 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
393 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
394 lu_ref_init(&lock->cll_reference);
395 lu_ref_init(&lock->cll_holders);
396 mutex_init(&lock->cll_guard);
397 lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
398 cfs_waitq_init(&lock->cll_wq);
399 head = obj->co_lu.lo_header;
400 atomic_inc(&site->cs_locks_state[CLS_NEW]);
401 atomic_inc(&site->cs_locks.cs_total);
402 atomic_inc(&site->cs_locks.cs_created);
403 cl_lock_lockdep_init(lock);
404 list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
407 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
409 cl_lock_finish(env, lock);
415 lock = ERR_PTR(-ENOMEM);
420 * Transfer the lock into INTRANSIT state and return the original state.
422 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
423 * \post state: CLS_INTRANSIT
426 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
427 struct cl_lock *lock)
429 enum cl_lock_state state = lock->cll_state;
431 LASSERT(cl_lock_is_mutexed(lock));
432 LASSERT(state != CLS_INTRANSIT);
433 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
434 "Malformed lock state %d.\n", state);
436 cl_lock_state_set(env, lock, CLS_INTRANSIT);
437 lock->cll_intransit_owner = cfs_current();
438 cl_lock_hold_add(env, lock, "intransit", cfs_current());
441 EXPORT_SYMBOL(cl_lock_intransit);
444 * Exit the intransit state and restore the lock state to the original state
446 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
447 enum cl_lock_state state)
449 LASSERT(cl_lock_is_mutexed(lock));
450 LASSERT(lock->cll_state == CLS_INTRANSIT);
451 LASSERT(state != CLS_INTRANSIT);
452 LASSERT(lock->cll_intransit_owner == cfs_current());
454 lock->cll_intransit_owner = NULL;
455 cl_lock_state_set(env, lock, state);
456 cl_lock_unhold(env, lock, "intransit", cfs_current());
458 EXPORT_SYMBOL(cl_lock_extransit);
461 * Checking whether the lock is intransit state
463 int cl_lock_is_intransit(struct cl_lock *lock)
465 LASSERT(cl_lock_is_mutexed(lock));
466 return lock->cll_state == CLS_INTRANSIT &&
467 lock->cll_intransit_owner != cfs_current();
469 EXPORT_SYMBOL(cl_lock_is_intransit);
471 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
472 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
473 * cover multiple stripes and can trigger cascading timeouts.
475 static int cl_lock_fits_into(const struct lu_env *env,
476 const struct cl_lock *lock,
477 const struct cl_lock_descr *need,
478 const struct cl_io *io)
480 const struct cl_lock_slice *slice;
482 LINVRNT(cl_lock_invariant_trusted(env, lock));
484 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
485 if (slice->cls_ops->clo_fits_into != NULL &&
486 !slice->cls_ops->clo_fits_into(env, slice, need, io))
492 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
493 struct cl_object *obj,
494 const struct cl_io *io,
495 const struct cl_lock_descr *need)
497 struct cl_lock *lock;
498 struct cl_object_header *head;
499 struct cl_site *site;
503 head = cl_object_header(obj);
504 site = cl_object_site(obj);
505 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
506 atomic_inc(&site->cs_locks.cs_lookup);
507 list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
510 LASSERT(cl_is_lock(lock));
511 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
512 lock->cll_state < CLS_FREEING &&
513 !(lock->cll_flags & CLF_CANCELLED) &&
514 cl_lock_fits_into(env, lock, need, io);
515 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%i) need: "DDESCR": %d\n",
516 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
519 cl_lock_get_trust(lock);
520 /* move the lock to the LRU head */
521 list_move(&lock->cll_linkage, &head->coh_locks);
522 atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
530 * Returns a lock matching description \a need.
532 * This is the main entry point into the cl_lock caching interface. First, a
533 * cache (implemented as a per-object linked list) is consulted. If lock is
534 * found there, it is returned immediately. Otherwise new lock is allocated
535 * and returned. In any case, additional reference to lock is acquired.
537 * \see cl_object_find(), cl_page_find()
539 static struct cl_lock *cl_lock_find(const struct lu_env *env,
540 const struct cl_io *io,
541 const struct cl_lock_descr *need)
543 struct cl_object_header *head;
544 struct cl_object *obj;
545 struct cl_lock *lock;
546 struct cl_site *site;
551 head = cl_object_header(obj);
552 site = cl_object_site(obj);
554 spin_lock(&head->coh_lock_guard);
555 lock = cl_lock_lookup(env, obj, io, need);
556 spin_unlock(&head->coh_lock_guard);
559 lock = cl_lock_alloc(env, obj, io, need);
561 struct cl_lock *ghost;
563 spin_lock(&head->coh_lock_guard);
564 ghost = cl_lock_lookup(env, obj, io, need);
566 list_add(&lock->cll_linkage, &head->coh_locks);
567 spin_unlock(&head->coh_lock_guard);
568 atomic_inc(&site->cs_locks.cs_busy);
570 spin_unlock(&head->coh_lock_guard);
572 * Other threads can acquire references to the
573 * top-lock through its sub-locks. Hence, it
574 * cannot be cl_lock_free()-ed immediately.
576 cl_lock_finish(env, lock);
585 * Returns existing lock matching given description. This is similar to
586 * cl_lock_find() except that no new lock is created, and returned lock is
587 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
589 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
590 const struct cl_lock_descr *need,
591 const char *scope, const void *source)
593 struct cl_object_header *head;
594 struct cl_object *obj;
595 struct cl_lock *lock;
599 head = cl_object_header(obj);
601 spin_lock(&head->coh_lock_guard);
602 lock = cl_lock_lookup(env, obj, io, need);
603 spin_unlock(&head->coh_lock_guard);
608 cl_lock_mutex_get(env, lock);
609 if (lock->cll_state == CLS_INTRANSIT)
610 cl_lock_state_wait(env, lock); /* Don't care return value. */
611 if (lock->cll_state == CLS_CACHED) {
613 result = cl_use_try(env, lock, 1);
615 cl_lock_error(env, lock, result);
617 ok = lock->cll_state == CLS_HELD;
619 cl_lock_hold_add(env, lock, scope, source);
620 cl_lock_user_add(env, lock);
621 cl_lock_put(env, lock);
623 cl_lock_mutex_put(env, lock);
625 cl_lock_put(env, lock);
631 EXPORT_SYMBOL(cl_lock_peek);
634 * Returns a slice within a lock, corresponding to the given layer in the
639 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
640 const struct lu_device_type *dtype)
642 const struct cl_lock_slice *slice;
644 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
647 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
648 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
653 EXPORT_SYMBOL(cl_lock_at);
655 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
657 struct cl_thread_counters *counters;
659 counters = cl_lock_counters(env, lock);
661 counters->ctc_nr_locks_locked++;
662 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
663 cl_lock_trace(D_TRACE, env, "got mutex", lock);
667 * Locks cl_lock object.
669 * This is used to manipulate cl_lock fields, and to serialize state
670 * transitions in the lock state machine.
672 * \post cl_lock_is_mutexed(lock)
674 * \see cl_lock_mutex_put()
676 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
678 LINVRNT(cl_lock_invariant(env, lock));
680 if (lock->cll_guarder == cfs_current()) {
681 LINVRNT(cl_lock_is_mutexed(lock));
682 LINVRNT(lock->cll_depth > 0);
684 struct cl_object_header *hdr;
685 struct cl_thread_info *info;
688 LINVRNT(lock->cll_guarder != cfs_current());
689 hdr = cl_object_header(lock->cll_descr.cld_obj);
691 * Check that mutices are taken in the bottom-to-top order.
693 info = cl_env_info(env);
694 for (i = 0; i < hdr->coh_nesting; ++i)
695 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
696 mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
697 lock->cll_guarder = cfs_current();
698 LINVRNT(lock->cll_depth == 0);
700 cl_lock_mutex_tail(env, lock);
702 EXPORT_SYMBOL(cl_lock_mutex_get);
705 * Try-locks cl_lock object.
707 * \retval 0 \a lock was successfully locked
709 * \retval -EBUSY \a lock cannot be locked right now
711 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
713 * \see cl_lock_mutex_get()
715 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
719 LINVRNT(cl_lock_invariant_trusted(env, lock));
723 if (lock->cll_guarder == cfs_current()) {
724 LINVRNT(lock->cll_depth > 0);
725 cl_lock_mutex_tail(env, lock);
726 } else if (mutex_trylock(&lock->cll_guard)) {
727 LINVRNT(lock->cll_depth == 0);
728 lock->cll_guarder = cfs_current();
729 cl_lock_mutex_tail(env, lock);
734 EXPORT_SYMBOL(cl_lock_mutex_try);
737 {* Unlocks cl_lock object.
739 * \pre cl_lock_is_mutexed(lock)
741 * \see cl_lock_mutex_get()
743 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
745 struct cl_thread_counters *counters;
747 LINVRNT(cl_lock_invariant(env, lock));
748 LINVRNT(cl_lock_is_mutexed(lock));
749 LINVRNT(lock->cll_guarder == cfs_current());
750 LINVRNT(lock->cll_depth > 0);
752 counters = cl_lock_counters(env, lock);
753 LINVRNT(counters->ctc_nr_locks_locked > 0);
755 cl_lock_trace(D_TRACE, env, "put mutex", lock);
756 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
757 counters->ctc_nr_locks_locked--;
758 if (--lock->cll_depth == 0) {
759 lock->cll_guarder = NULL;
760 mutex_unlock(&lock->cll_guard);
763 EXPORT_SYMBOL(cl_lock_mutex_put);
766 * Returns true iff lock's mutex is owned by the current thread.
768 int cl_lock_is_mutexed(struct cl_lock *lock)
770 return lock->cll_guarder == cfs_current();
772 EXPORT_SYMBOL(cl_lock_is_mutexed);
775 * Returns number of cl_lock mutices held by the current thread (environment).
777 int cl_lock_nr_mutexed(const struct lu_env *env)
779 struct cl_thread_info *info;
784 * NOTE: if summation across all nesting levels (currently 2) proves
785 * too expensive, a summary counter can be added to
786 * struct cl_thread_info.
788 info = cl_env_info(env);
789 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
790 locked += info->clt_counters[i].ctc_nr_locks_locked;
793 EXPORT_SYMBOL(cl_lock_nr_mutexed);
795 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
797 LINVRNT(cl_lock_is_mutexed(lock));
798 LINVRNT(cl_lock_invariant(env, lock));
800 if (!(lock->cll_flags & CLF_CANCELLED)) {
801 const struct cl_lock_slice *slice;
803 lock->cll_flags |= CLF_CANCELLED;
804 list_for_each_entry_reverse(slice, &lock->cll_layers,
806 if (slice->cls_ops->clo_cancel != NULL)
807 slice->cls_ops->clo_cancel(env, slice);
813 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
815 struct cl_object_header *head;
816 const struct cl_lock_slice *slice;
818 LINVRNT(cl_lock_is_mutexed(lock));
819 LINVRNT(cl_lock_invariant(env, lock));
822 if (lock->cll_state < CLS_FREEING) {
823 cl_lock_state_set(env, lock, CLS_FREEING);
825 head = cl_object_header(lock->cll_descr.cld_obj);
827 spin_lock(&head->coh_lock_guard);
828 list_del_init(&lock->cll_linkage);
830 spin_unlock(&head->coh_lock_guard);
832 * From now on, no new references to this lock can be acquired
833 * by cl_lock_lookup().
835 list_for_each_entry_reverse(slice, &lock->cll_layers,
837 if (slice->cls_ops->clo_delete != NULL)
838 slice->cls_ops->clo_delete(env, slice);
841 * From now on, no new references to this lock can be acquired
842 * by layer-specific means (like a pointer from struct
843 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
846 * Lock will be finally freed in cl_lock_put() when last of
847 * existing references goes away.
854 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
855 * top-lock (nesting == 0) accounts for this modification in the per-thread
856 * debugging counters. Sub-lock holds can be released by a thread different
857 * from one that acquired it.
859 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
862 struct cl_thread_counters *counters;
863 enum clt_nesting_level nesting;
865 lock->cll_holds += delta;
866 nesting = cl_lock_nesting(lock);
867 if (nesting == CNL_TOP) {
868 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
869 counters->ctc_nr_held += delta;
870 LASSERT(counters->ctc_nr_held >= 0);
875 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
876 * cl_lock_hold_mod() for the explanation of the debugging code.
878 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
881 struct cl_thread_counters *counters;
882 enum clt_nesting_level nesting;
884 lock->cll_users += delta;
885 nesting = cl_lock_nesting(lock);
886 if (nesting == CNL_TOP) {
887 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
888 counters->ctc_nr_used += delta;
889 LASSERT(counters->ctc_nr_used >= 0);
893 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
894 const char *scope, const void *source)
896 LINVRNT(cl_lock_is_mutexed(lock));
897 LINVRNT(cl_lock_invariant(env, lock));
898 LASSERT(lock->cll_holds > 0);
901 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
902 lu_ref_del(&lock->cll_holders, scope, source);
903 cl_lock_hold_mod(env, lock, -1);
904 if (lock->cll_holds == 0) {
905 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
906 lock->cll_descr.cld_mode == CLM_GROUP)
908 * If lock is still phantom or grouplock when user is
909 * done with it---destroy the lock.
911 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
912 if (lock->cll_flags & CLF_CANCELPEND) {
913 lock->cll_flags &= ~CLF_CANCELPEND;
914 cl_lock_cancel0(env, lock);
916 if (lock->cll_flags & CLF_DOOMED) {
917 /* no longer doomed: it's dead... Jim. */
918 lock->cll_flags &= ~CLF_DOOMED;
919 cl_lock_delete0(env, lock);
927 * Waits until lock state is changed.
929 * This function is called with cl_lock mutex locked, atomically releases
930 * mutex and goes to sleep, waiting for a lock state change (signaled by
931 * cl_lock_signal()), and re-acquires the mutex before return.
933 * This function is used to wait until lock state machine makes some progress
934 * and to emulate synchronous operations on top of asynchronous lock
937 * \retval -EINTR wait was interrupted
939 * \retval 0 wait wasn't interrupted
941 * \pre cl_lock_is_mutexed(lock)
943 * \see cl_lock_signal()
945 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
947 cfs_waitlink_t waiter;
951 LINVRNT(cl_lock_is_mutexed(lock));
952 LINVRNT(cl_lock_invariant(env, lock));
953 LASSERT(lock->cll_depth == 1);
954 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
956 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
957 result = lock->cll_error;
959 cfs_waitlink_init(&waiter);
960 cfs_waitq_add(&lock->cll_wq, &waiter);
961 set_current_state(CFS_TASK_INTERRUPTIBLE);
962 cl_lock_mutex_put(env, lock);
964 LASSERT(cl_lock_nr_mutexed(env) == 0);
965 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
967 cl_lock_mutex_get(env, lock);
968 set_current_state(CFS_TASK_RUNNING);
969 cfs_waitq_del(&lock->cll_wq, &waiter);
970 result = cfs_signal_pending() ? -EINTR : 0;
974 EXPORT_SYMBOL(cl_lock_state_wait);
976 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
977 enum cl_lock_state state)
979 const struct cl_lock_slice *slice;
982 LINVRNT(cl_lock_is_mutexed(lock));
983 LINVRNT(cl_lock_invariant(env, lock));
985 list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
986 if (slice->cls_ops->clo_state != NULL)
987 slice->cls_ops->clo_state(env, slice, state);
988 cfs_waitq_broadcast(&lock->cll_wq);
993 * Notifies waiters that lock state changed.
995 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
996 * layers about state change by calling cl_lock_operations::clo_state()
999 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1002 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1003 cl_lock_state_signal(env, lock, lock->cll_state);
1006 EXPORT_SYMBOL(cl_lock_signal);
1009 * Changes lock state.
1011 * This function is invoked to notify layers that lock state changed, possible
1012 * as a result of an asynchronous event such as call-back reception.
1014 * \post lock->cll_state == state
1016 * \see cl_lock_operations::clo_state()
1018 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1019 enum cl_lock_state state)
1021 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1024 LASSERT(lock->cll_state <= state ||
1025 (lock->cll_state == CLS_CACHED &&
1026 (state == CLS_HELD || /* lock found in cache */
1027 state == CLS_NEW || /* sub-lock canceled */
1028 state == CLS_INTRANSIT)) ||
1029 /* lock is in transit state */
1030 lock->cll_state == CLS_INTRANSIT);
1032 if (lock->cll_state != state) {
1033 atomic_dec(&site->cs_locks_state[lock->cll_state]);
1034 atomic_inc(&site->cs_locks_state[state]);
1036 cl_lock_state_signal(env, lock, state);
1037 lock->cll_state = state;
1041 EXPORT_SYMBOL(cl_lock_state_set);
1043 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1045 const struct cl_lock_slice *slice;
1051 if (lock->cll_error != 0)
1054 LINVRNT(cl_lock_is_mutexed(lock));
1055 LINVRNT(cl_lock_invariant(env, lock));
1056 LASSERT(lock->cll_state == CLS_INTRANSIT);
1057 LASSERT(lock->cll_users > 0);
1058 LASSERT(lock->cll_holds > 0);
1061 list_for_each_entry_reverse(slice, &lock->cll_layers,
1063 if (slice->cls_ops->clo_unuse != NULL) {
1064 result = slice->cls_ops->clo_unuse(env, slice);
1069 LASSERT(result != -ENOSYS);
1070 } while (result == CLO_REPEAT);
1072 return result ?: lock->cll_error;
1076 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1077 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1078 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1079 * use process atomic
1081 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1083 const struct cl_lock_slice *slice;
1085 enum cl_lock_state state;
1088 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1091 state = cl_lock_intransit(env, lock);
1092 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1093 if (slice->cls_ops->clo_use != NULL) {
1094 result = slice->cls_ops->clo_use(env, slice);
1099 LASSERT(result != -ENOSYS);
1101 LASSERT(lock->cll_state == CLS_INTRANSIT);
1106 if (result == -ESTALE) {
1108 * ESTALE means sublock being cancelled
1109 * at this time, and set lock state to
1110 * be NEW here and ask the caller to repeat.
1113 result = CLO_REPEAT;
1116 /* @atomic means back-off-on-failure. */
1121 rc = cl_unuse_try_internal(env, lock);
1125 rc = cl_lock_state_wait(env, lock);
1130 /* Vet the results. */
1131 if (rc < 0 && result > 0)
1136 cl_lock_extransit(env, lock, state);
1139 EXPORT_SYMBOL(cl_use_try);
1142 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1145 static int cl_enqueue_kick(const struct lu_env *env,
1146 struct cl_lock *lock,
1147 struct cl_io *io, __u32 flags)
1150 const struct cl_lock_slice *slice;
1154 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1155 if (slice->cls_ops->clo_enqueue != NULL) {
1156 result = slice->cls_ops->clo_enqueue(env,
1162 LASSERT(result != -ENOSYS);
1167 * Tries to enqueue a lock.
1169 * This function is called repeatedly by cl_enqueue() until either lock is
1170 * enqueued, or error occurs. This function does not block waiting for
1171 * networking communication to complete.
1173 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1174 * lock->cll_state == CLS_HELD)
1176 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1177 * \see cl_lock_state::CLS_ENQUEUED
1179 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1180 struct cl_io *io, __u32 flags)
1185 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1189 LINVRNT(cl_lock_is_mutexed(lock));
1191 if (lock->cll_error != 0)
1193 switch (lock->cll_state) {
1195 cl_lock_state_set(env, lock, CLS_QUEUING);
1199 result = cl_enqueue_kick(env, lock, io, flags);
1201 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1204 LASSERT(cl_lock_is_intransit(lock));
1208 /* yank lock from the cache. */
1209 result = cl_use_try(env, lock, 0);
1218 * impossible, only held locks with increased
1219 * ->cll_holds can be enqueued, and they cannot be
1224 } while (result == CLO_REPEAT);
1226 cl_lock_error(env, lock, result);
1227 RETURN(result ?: lock->cll_error);
1229 EXPORT_SYMBOL(cl_enqueue_try);
1231 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1232 struct cl_io *io, __u32 enqflags)
1238 LINVRNT(cl_lock_is_mutexed(lock));
1239 LINVRNT(cl_lock_invariant(env, lock));
1240 LASSERT(lock->cll_holds > 0);
1242 cl_lock_user_add(env, lock);
1244 result = cl_enqueue_try(env, lock, io, enqflags);
1245 if (result == CLO_WAIT) {
1246 result = cl_lock_state_wait(env, lock);
1253 cl_lock_user_del(env, lock);
1254 if (result != -EINTR)
1255 cl_lock_error(env, lock, result);
1257 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1258 lock->cll_state == CLS_HELD));
1265 * \pre current thread or io owns a hold on lock.
1267 * \post ergo(result == 0, lock->users increased)
1268 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1269 * lock->cll_state == CLS_HELD)
1271 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1272 struct cl_io *io, __u32 enqflags)
1278 cl_lock_lockdep_acquire(env, lock, enqflags);
1279 cl_lock_mutex_get(env, lock);
1280 result = cl_enqueue_locked(env, lock, io, enqflags);
1281 cl_lock_mutex_put(env, lock);
1283 cl_lock_lockdep_release(env, lock);
1284 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1285 lock->cll_state == CLS_HELD));
1288 EXPORT_SYMBOL(cl_enqueue);
1291 * Tries to unlock a lock.
1293 * This function is called repeatedly by cl_unuse() until either lock is
1294 * unlocked, or error occurs.
1296 * \pre lock->cll_state <= CLS_HELD || cl_lock_is_intransit(lock)
1298 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1300 * \see cl_unuse() cl_lock_operations::clo_unuse()
1301 * \see cl_lock_state::CLS_CACHED
1303 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1306 enum cl_lock_state state = CLS_NEW;
1309 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1311 if (lock->cll_state != CLS_INTRANSIT) {
1312 if (lock->cll_users > 1) {
1313 cl_lock_user_del(env, lock);
1317 * New lock users (->cll_users) are not protecting unlocking
1318 * from proceeding. From this point, lock eventually reaches
1319 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1322 state = cl_lock_intransit(env, lock);
1325 result = cl_unuse_try_internal(env, lock);
1326 LASSERT(lock->cll_state == CLS_INTRANSIT);
1327 if (result != CLO_WAIT)
1329 * Once there is no more need to iterate ->clo_unuse() calls,
1330 * remove lock user. This is done even if unrecoverable error
1331 * happened during unlocking, because nothing else can be
1334 cl_lock_user_del(env, lock);
1335 if (result == 0 || result == -ESTALE) {
1337 * Return lock back to the cache. This is the only
1338 * place where lock is moved into CLS_CACHED state.
1340 * If one of ->clo_unuse() methods returned -ESTALE, lock
1341 * cannot be placed into cache and has to be
1342 * re-initialized. This happens e.g., when a sub-lock was
1343 * canceled while unlocking was in progress.
1345 state = result == 0 ? CLS_CACHED : CLS_NEW;
1346 cl_lock_extransit(env, lock, state);
1349 * Hide -ESTALE error.
1350 * If the lock is a glimpse lock, and it has multiple
1351 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1352 * and other sublocks are matched write locks. In this case,
1353 * we can't set this lock to error because otherwise some of
1354 * its sublocks may not be canceled. This causes some dirty
1355 * pages won't be written to OSTs. -jay
1359 CWARN("result = %d, this is unlikely!\n", result);
1360 cl_lock_extransit(env, lock, state);
1363 result = result ?: lock->cll_error;
1365 cl_lock_error(env, lock, result);
1368 EXPORT_SYMBOL(cl_unuse_try);
1370 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1373 LASSERT(lock->cll_state <= CLS_HELD);
1377 result = cl_unuse_try(env, lock);
1378 if (result == CLO_WAIT) {
1379 result = cl_lock_state_wait(env, lock);
1391 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1394 cl_lock_mutex_get(env, lock);
1395 cl_unuse_locked(env, lock);
1396 cl_lock_mutex_put(env, lock);
1397 cl_lock_lockdep_release(env, lock);
1400 EXPORT_SYMBOL(cl_unuse);
1403 * Tries to wait for a lock.
1405 * This function is called repeatedly by cl_wait() until either lock is
1406 * granted, or error occurs. This function does not block waiting for network
1407 * communication to complete.
1409 * \see cl_wait() cl_lock_operations::clo_wait()
1410 * \see cl_lock_state::CLS_HELD
1412 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1414 const struct cl_lock_slice *slice;
1418 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1420 LINVRNT(cl_lock_is_mutexed(lock));
1421 LINVRNT(cl_lock_invariant(env, lock));
1422 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1423 lock->cll_state == CLS_HELD ||
1424 lock->cll_state == CLS_INTRANSIT);
1425 LASSERT(lock->cll_users > 0);
1426 LASSERT(lock->cll_holds > 0);
1429 if (lock->cll_error != 0)
1432 if (cl_lock_is_intransit(lock)) {
1437 if (lock->cll_state == CLS_HELD)
1442 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1443 if (slice->cls_ops->clo_wait != NULL) {
1444 result = slice->cls_ops->clo_wait(env, slice);
1449 LASSERT(result != -ENOSYS);
1451 cl_lock_state_set(env, lock, CLS_HELD);
1452 } while (result == CLO_REPEAT);
1453 RETURN(result ?: lock->cll_error);
1455 EXPORT_SYMBOL(cl_wait_try);
1458 * Waits until enqueued lock is granted.
1460 * \pre current thread or io owns a hold on the lock
1461 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1462 * lock->cll_state == CLS_HELD)
1464 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1466 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1471 cl_lock_mutex_get(env, lock);
1473 LINVRNT(cl_lock_invariant(env, lock));
1474 LASSERT(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD);
1475 LASSERT(lock->cll_holds > 0);
1476 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1479 result = cl_wait_try(env, lock);
1480 if (result == CLO_WAIT) {
1481 result = cl_lock_state_wait(env, lock);
1488 cl_lock_user_del(env, lock);
1489 if (result != -EINTR)
1490 cl_lock_error(env, lock, result);
1491 cl_lock_lockdep_release(env, lock);
1493 cl_lock_mutex_put(env, lock);
1494 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1497 EXPORT_SYMBOL(cl_wait);
1500 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1503 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1505 const struct cl_lock_slice *slice;
1506 unsigned long pound;
1507 unsigned long ounce;
1510 LINVRNT(cl_lock_is_mutexed(lock));
1511 LINVRNT(cl_lock_invariant(env, lock));
1514 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1515 if (slice->cls_ops->clo_weigh != NULL) {
1516 ounce = slice->cls_ops->clo_weigh(env, slice);
1518 if (pound < ounce) /* over-weight^Wflow */
1524 EXPORT_SYMBOL(cl_lock_weigh);
1527 * Notifies layers that lock description changed.
1529 * The server can grant client a lock different from one that was requested
1530 * (e.g., larger in extent). This method is called when actually granted lock
1531 * description becomes known to let layers to accommodate for changed lock
1534 * \see cl_lock_operations::clo_modify()
1536 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1537 const struct cl_lock_descr *desc)
1539 const struct cl_lock_slice *slice;
1540 struct cl_object *obj = lock->cll_descr.cld_obj;
1541 struct cl_object_header *hdr = cl_object_header(obj);
1545 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1546 /* don't allow object to change */
1547 LASSERT(obj == desc->cld_obj);
1548 LINVRNT(cl_lock_is_mutexed(lock));
1549 LINVRNT(cl_lock_invariant(env, lock));
1551 list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1552 if (slice->cls_ops->clo_modify != NULL) {
1553 result = slice->cls_ops->clo_modify(env, slice, desc);
1558 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1559 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1561 * Just replace description in place. Nothing more is needed for
1562 * now. If locks were indexed according to their extent and/or mode,
1563 * that index would have to be updated here.
1565 spin_lock(&hdr->coh_lock_guard);
1566 lock->cll_descr = *desc;
1567 spin_unlock(&hdr->coh_lock_guard);
1570 EXPORT_SYMBOL(cl_lock_modify);
1573 * Initializes lock closure with a given origin.
1575 * \see cl_lock_closure
1577 void cl_lock_closure_init(const struct lu_env *env,
1578 struct cl_lock_closure *closure,
1579 struct cl_lock *origin, int wait)
1581 LINVRNT(cl_lock_is_mutexed(origin));
1582 LINVRNT(cl_lock_invariant(env, origin));
1584 CFS_INIT_LIST_HEAD(&closure->clc_list);
1585 closure->clc_origin = origin;
1586 closure->clc_wait = wait;
1587 closure->clc_nr = 0;
1589 EXPORT_SYMBOL(cl_lock_closure_init);
1592 * Builds a closure of \a lock.
1594 * Building of a closure consists of adding initial lock (\a lock) into it,
1595 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1596 * methods might call cl_lock_closure_build() recursively again, adding more
1597 * locks to the closure, etc.
1599 * \see cl_lock_closure
1601 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1602 struct cl_lock_closure *closure)
1604 const struct cl_lock_slice *slice;
1608 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1609 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1611 result = cl_lock_enclosure(env, lock, closure);
1613 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1614 if (slice->cls_ops->clo_closure != NULL) {
1615 result = slice->cls_ops->clo_closure(env, slice,
1623 cl_lock_disclosure(env, closure);
1626 EXPORT_SYMBOL(cl_lock_closure_build);
1629 * Adds new lock to a closure.
1631 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1632 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1633 * until next try-lock is likely to succeed.
1635 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1636 struct cl_lock_closure *closure)
1640 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1641 if (!cl_lock_mutex_try(env, lock)) {
1643 * If lock->cll_inclosure is not empty, lock is already in
1646 if (list_empty(&lock->cll_inclosure)) {
1647 cl_lock_get_trust(lock);
1648 lu_ref_add(&lock->cll_reference, "closure", closure);
1649 list_add(&lock->cll_inclosure, &closure->clc_list);
1652 cl_lock_mutex_put(env, lock);
1655 cl_lock_disclosure(env, closure);
1656 if (closure->clc_wait) {
1657 cl_lock_get_trust(lock);
1658 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1659 cl_lock_mutex_put(env, closure->clc_origin);
1661 LASSERT(cl_lock_nr_mutexed(env) == 0);
1662 cl_lock_mutex_get(env, lock);
1663 cl_lock_mutex_put(env, lock);
1665 cl_lock_mutex_get(env, closure->clc_origin);
1666 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1667 cl_lock_put(env, lock);
1669 result = CLO_REPEAT;
1673 EXPORT_SYMBOL(cl_lock_enclosure);
1675 /** Releases mutices of enclosed locks. */
1676 void cl_lock_disclosure(const struct lu_env *env,
1677 struct cl_lock_closure *closure)
1679 struct cl_lock *scan;
1680 struct cl_lock *temp;
1682 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1683 list_for_each_entry_safe(scan, temp, &closure->clc_list, cll_inclosure){
1684 list_del_init(&scan->cll_inclosure);
1685 cl_lock_mutex_put(env, scan);
1686 lu_ref_del(&scan->cll_reference, "closure", closure);
1687 cl_lock_put(env, scan);
1690 LASSERT(closure->clc_nr == 0);
1692 EXPORT_SYMBOL(cl_lock_disclosure);
1694 /** Finalizes a closure. */
1695 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1697 LASSERT(closure->clc_nr == 0);
1698 LASSERT(list_empty(&closure->clc_list));
1700 EXPORT_SYMBOL(cl_lock_closure_fini);
1703 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1704 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1705 * destruction until all holds are released. This is called when a decision is
1706 * made to destroy the lock in the future. E.g., when a blocking AST is
1707 * received on it, or fatal communication error happens.
1709 * Caller must have a reference on this lock to prevent a situation, when
1710 * deleted lock lingers in memory for indefinite time, because nobody calls
1711 * cl_lock_put() to finish it.
1713 * \pre atomic_read(&lock->cll_ref) > 0
1714 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1715 * cl_lock_nr_mutexed(env) == 1)
1716 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1717 * held, as deletion of sub-locks might require releasing a top-lock
1720 * \see cl_lock_operations::clo_delete()
1721 * \see cl_lock::cll_holds
1723 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1725 LINVRNT(cl_lock_is_mutexed(lock));
1726 LINVRNT(cl_lock_invariant(env, lock));
1727 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1728 cl_lock_nr_mutexed(env) == 1));
1731 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1732 if (lock->cll_holds == 0)
1733 cl_lock_delete0(env, lock);
1735 lock->cll_flags |= CLF_DOOMED;
1738 EXPORT_SYMBOL(cl_lock_delete);
1741 * Mark lock as irrecoverably failed, and mark it for destruction. This
1742 * happens when, e.g., server fails to grant a lock to us, or networking
1745 * \pre atomic_read(&lock->cll_ref) > 0
1747 * \see clo_lock_delete()
1748 * \see cl_lock::cll_holds
1750 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1752 LINVRNT(cl_lock_is_mutexed(lock));
1753 LINVRNT(cl_lock_invariant(env, lock));
1756 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1757 if (lock->cll_error == 0 && error != 0) {
1758 lock->cll_error = error;
1759 cl_lock_signal(env, lock);
1760 cl_lock_cancel(env, lock);
1761 cl_lock_delete(env, lock);
1765 EXPORT_SYMBOL(cl_lock_error);
1768 * Cancels this lock. Notifies layers
1769 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1770 * there are holds on the lock, postpone cancellation until
1771 * all holds are released.
1773 * Cancellation notification is delivered to layers at most once.
1775 * \see cl_lock_operations::clo_cancel()
1776 * \see cl_lock::cll_holds
1778 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1780 LINVRNT(cl_lock_is_mutexed(lock));
1781 LINVRNT(cl_lock_invariant(env, lock));
1784 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1785 if (lock->cll_holds == 0)
1786 cl_lock_cancel0(env, lock);
1788 lock->cll_flags |= CLF_CANCELPEND;
1791 EXPORT_SYMBOL(cl_lock_cancel);
1794 * Finds an existing lock covering given page and optionally different from a
1795 * given \a except lock.
1797 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1798 struct cl_page *page, struct cl_lock *except,
1799 int pending, int canceld)
1801 struct cl_object_header *head;
1802 struct cl_lock *scan;
1803 struct cl_lock *lock;
1804 struct cl_lock_descr *need;
1808 head = cl_object_header(obj);
1809 need = &cl_env_info(env)->clt_descr;
1812 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1814 need->cld_start = need->cld_end = page->cp_index;
1816 spin_lock(&head->coh_lock_guard);
1817 /* It is fine to match any group lock since there could be only one
1818 * with a uniq gid and it conflicts with all other lock modes too */
1819 list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1820 if (scan != except &&
1821 (scan->cll_descr.cld_mode == CLM_GROUP ||
1822 cl_lock_ext_match(&scan->cll_descr, need)) &&
1823 scan->cll_state >= CLS_HELD &&
1824 scan->cll_state < CLS_FREEING &&
1826 * This check is racy as the lock can be canceled right
1827 * after it is done, but this is fine, because page exists
1830 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1831 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1832 /* Don't increase cs_hit here since this
1833 * is just a helper function. */
1834 cl_lock_get_trust(scan);
1839 spin_unlock(&head->coh_lock_guard);
1842 EXPORT_SYMBOL(cl_lock_at_page);
1845 * Returns a list of pages protected (only) by a given lock.
1847 * Scans an extent of page radix tree, corresponding to the \a lock and queues
1848 * all pages that are not protected by locks other than \a lock into \a queue.
1850 void cl_lock_page_list_fixup(const struct lu_env *env,
1851 struct cl_io *io, struct cl_lock *lock,
1852 struct cl_page_list *queue)
1854 struct cl_page *page;
1855 struct cl_page *temp;
1856 struct cl_page_list *plist = &cl_env_info(env)->clt_list;
1858 LINVRNT(cl_lock_invariant(env, lock));
1861 /* Now, we have a list of cl_pages under the \a lock, we need
1862 * to check if some of pages are covered by other ldlm lock.
1863 * If this is the case, they aren't needed to be written out this time.
1865 * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1866 * the latter is to be canceled, this means other client is
1867 * reading/writing [200,300] since A won't canceled. Actually
1868 * we just need to write the pages covered by [200,300]. This is safe,
1869 * since [100,200] is also protected lock A.
1872 cl_page_list_init(plist);
1873 cl_page_list_for_each_safe(page, temp, queue) {
1874 pgoff_t idx = page->cp_index;
1875 struct cl_lock *found;
1876 struct cl_lock_descr *descr;
1878 /* The algorithm counts on the index-ascending page index. */
1879 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1880 page->cp_index < temp->cp_index));
1882 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1887 descr = &found->cll_descr;
1888 list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1890 idx = page->cp_index;
1891 if (descr->cld_start > idx || descr->cld_end < idx)
1893 cl_page_list_move(plist, queue, page);
1895 cl_lock_put(env, found);
1898 /* The pages in plist are covered by other locks, don't handle them
1902 cl_page_list_disown(env, io, plist);
1903 cl_page_list_fini(env, plist);
1906 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1909 * Invalidate pages protected by the given lock, sending them out to the
1910 * server first, if necessary.
1912 * This function does the following:
1914 * - collects a list of pages to be invalidated,
1916 * - unmaps them from the user virtual memory,
1918 * - sends dirty pages to the server,
1920 * - waits for transfer completion,
1922 * - discards pages, and throws them out of memory.
1924 * If \a discard is set, pages are discarded without sending them to the
1927 * If error happens on any step, the process continues anyway (the reasoning
1928 * behind this being that lock cancellation cannot be delayed indefinitely).
1930 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1933 struct cl_thread_info *info = cl_env_info(env);
1934 struct cl_io *io = &info->clt_io;
1935 struct cl_2queue *queue = &info->clt_queue;
1936 struct cl_lock_descr *descr = &lock->cll_descr;
1940 LINVRNT(cl_lock_invariant(env, lock));
1943 io->ci_obj = cl_object_top(descr->cld_obj);
1944 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1949 cl_2queue_init(queue);
1950 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1951 descr->cld_end, &queue->c2_qin, nonblock);
1952 page_count = queue->c2_qin.pl_nr;
1953 if (page_count > 0) {
1954 result = cl_page_list_unmap(env, io, &queue->c2_qin);
1956 long timeout = 600; /* 10 minutes. */
1957 /* for debug purpose, if this request can't be
1958 * finished in 10 minutes, we hope it can
1961 result = cl_io_submit_sync(env, io, CRT_WRITE,
1965 CWARN("Writing %lu pages error: %d\n",
1966 page_count, result);
1968 cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
1969 cl_2queue_discard(env, io, queue);
1970 cl_2queue_disown(env, io, queue);
1972 cl_2queue_fini(env, queue);
1979 cl_io_fini(env, io);
1982 EXPORT_SYMBOL(cl_lock_page_out);
1985 * Eliminate all locks for a given object.
1987 * Caller has to guarantee that no lock is in active use.
1989 * \param cancel when this is set, cl_locks_prune() cancels locks before
1992 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
1994 struct cl_object_header *head;
1995 struct cl_lock *lock;
1998 head = cl_object_header(obj);
2000 * If locks are destroyed without cancellation, all pages must be
2001 * already destroyed (as otherwise they will be left unprotected).
2003 LASSERT(ergo(!cancel,
2004 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2006 spin_lock(&head->coh_lock_guard);
2007 while (!list_empty(&head->coh_locks)) {
2008 lock = container_of(head->coh_locks.next,
2009 struct cl_lock, cll_linkage);
2010 cl_lock_get_trust(lock);
2011 spin_unlock(&head->coh_lock_guard);
2012 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2013 cl_lock_mutex_get(env, lock);
2014 if (lock->cll_state < CLS_FREEING) {
2015 LASSERT(lock->cll_holds == 0);
2016 LASSERT(lock->cll_users == 0);
2018 cl_lock_cancel(env, lock);
2019 cl_lock_delete(env, lock);
2021 cl_lock_mutex_put(env, lock);
2022 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2023 cl_lock_put(env, lock);
2024 spin_lock(&head->coh_lock_guard);
2026 spin_unlock(&head->coh_lock_guard);
2029 EXPORT_SYMBOL(cl_locks_prune);
2032 * Returns true if \a addr is an address of an allocated cl_lock. Used in
2033 * assertions. This check is optimistically imprecise, i.e., it occasionally
2034 * returns true for the incorrect addresses, but if it returns false, then the
2035 * address is guaranteed to be incorrect. (Should be named cl_lockp().)
2039 int cl_is_lock(const void *addr)
2041 return cfs_mem_is_in_cache(addr, cl_lock_kmem);
2043 EXPORT_SYMBOL(cl_is_lock);
2045 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2046 const struct cl_io *io,
2047 const struct cl_lock_descr *need,
2048 const char *scope, const void *source)
2050 struct cl_lock *lock;
2055 lock = cl_lock_find(env, io, need);
2058 cl_lock_mutex_get(env, lock);
2059 if (lock->cll_state < CLS_FREEING) {
2060 cl_lock_hold_mod(env, lock, +1);
2061 lu_ref_add(&lock->cll_holders, scope, source);
2062 lu_ref_add(&lock->cll_reference, scope, source);
2065 cl_lock_mutex_put(env, lock);
2066 cl_lock_put(env, lock);
2072 * Returns a lock matching \a need description with a reference and a hold on
2075 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2076 * guarantees that lock is not in the CLS_FREEING state on return.
2078 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2079 const struct cl_lock_descr *need,
2080 const char *scope, const void *source)
2082 struct cl_lock *lock;
2086 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2088 cl_lock_mutex_put(env, lock);
2091 EXPORT_SYMBOL(cl_lock_hold);
2094 * Main high-level entry point of cl_lock interface that finds existing or
2095 * enqueues new lock matching given description.
2097 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2098 const struct cl_lock_descr *need,
2100 const char *scope, const void *source)
2102 struct cl_lock *lock;
2103 const struct lu_fid *fid;
2109 fid = lu_object_fid(&io->ci_obj->co_lu);
2112 warn = iter >= 16 && IS_PO2(iter);
2113 CDEBUG(warn ? D_WARNING : D_DLMTRACE,
2114 DDESCR"@"DFID" %i %08x `%s'\n",
2115 PDESCR(need), PFID(fid), iter, enqflags, scope);
2116 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2117 if (!IS_ERR(lock)) {
2118 rc = cl_enqueue_locked(env, lock, io, enqflags);
2120 if (cl_lock_fits_into(env, lock, need, io)) {
2121 cl_lock_mutex_put(env, lock);
2122 cl_lock_lockdep_acquire(env,
2126 CL_LOCK_DEBUG(D_WARNING, env, lock,
2127 "got (see bug 17665)\n");
2128 cl_unuse_locked(env, lock);
2130 cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
2131 cl_lock_hold_release(env, lock, scope, source);
2132 cl_lock_mutex_put(env, lock);
2133 lu_ref_del(&lock->cll_reference, scope, source);
2134 cl_lock_put(env, lock);
2142 EXPORT_SYMBOL(cl_lock_request);
2145 * Adds a hold to a known lock.
2147 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2148 const char *scope, const void *source)
2150 LINVRNT(cl_lock_is_mutexed(lock));
2151 LINVRNT(cl_lock_invariant(env, lock));
2152 LASSERT(lock->cll_state != CLS_FREEING);
2155 cl_lock_hold_mod(env, lock, +1);
2157 lu_ref_add(&lock->cll_holders, scope, source);
2158 lu_ref_add(&lock->cll_reference, scope, source);
2161 EXPORT_SYMBOL(cl_lock_hold_add);
2164 * Releases a hold and a reference on a lock, on which caller acquired a
2167 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2168 const char *scope, const void *source)
2170 LINVRNT(cl_lock_invariant(env, lock));
2172 cl_lock_hold_release(env, lock, scope, source);
2173 lu_ref_del(&lock->cll_reference, scope, source);
2174 cl_lock_put(env, lock);
2177 EXPORT_SYMBOL(cl_lock_unhold);
2180 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2182 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2183 const char *scope, const void *source)
2185 LINVRNT(cl_lock_invariant(env, lock));
2187 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2188 cl_lock_mutex_get(env, lock);
2189 cl_lock_hold_release(env, lock, scope, source);
2190 cl_lock_mutex_put(env, lock);
2191 lu_ref_del(&lock->cll_reference, scope, source);
2192 cl_lock_put(env, lock);
2195 EXPORT_SYMBOL(cl_lock_release);
2197 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2199 LINVRNT(cl_lock_is_mutexed(lock));
2200 LINVRNT(cl_lock_invariant(env, lock));
2203 cl_lock_used_mod(env, lock, +1);
2206 EXPORT_SYMBOL(cl_lock_user_add);
2208 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2210 LINVRNT(cl_lock_is_mutexed(lock));
2211 LINVRNT(cl_lock_invariant(env, lock));
2212 LASSERT(lock->cll_users > 0);
2215 cl_lock_used_mod(env, lock, -1);
2216 RETURN(lock->cll_users == 0);
2218 EXPORT_SYMBOL(cl_lock_user_del);
2221 * Check if two lock's mode are compatible.
2223 * This returns true iff en-queuing \a lock2 won't cause cancellation of \a
2224 * lock1 even when these locks overlap.
2226 int cl_lock_compatible(const struct cl_lock *lock1, const struct cl_lock *lock2)
2228 enum cl_lock_mode mode1;
2229 enum cl_lock_mode mode2;
2232 mode1 = lock1->cll_descr.cld_mode;
2233 mode2 = lock2->cll_descr.cld_mode;
2234 RETURN(mode2 == CLM_PHANTOM ||
2235 (mode1 == CLM_READ && mode2 == CLM_READ));
2237 EXPORT_SYMBOL(cl_lock_compatible);
2239 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2241 static const char *names[] = {
2242 [CLM_PHANTOM] = "P",
2247 if (0 <= mode && mode < ARRAY_SIZE(names))
2252 EXPORT_SYMBOL(cl_lock_mode_name);
2255 * Prints human readable representation of a lock description.
2257 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2258 lu_printer_t printer,
2259 const struct cl_lock_descr *descr)
2261 const struct lu_fid *fid;
2263 fid = lu_object_fid(&descr->cld_obj->co_lu);
2264 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2266 EXPORT_SYMBOL(cl_lock_descr_print);
2269 * Prints human readable representation of \a lock to the \a f.
2271 void cl_lock_print(const struct lu_env *env, void *cookie,
2272 lu_printer_t printer, const struct cl_lock *lock)
2274 const struct cl_lock_slice *slice;
2275 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2276 lock, atomic_read(&lock->cll_ref),
2277 lock->cll_state, lock->cll_error, lock->cll_holds,
2278 lock->cll_users, lock->cll_flags);
2279 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2280 (*printer)(env, cookie, " {\n");
2282 list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2283 (*printer)(env, cookie, " %s@%p: ",
2284 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2286 if (slice->cls_ops->clo_print != NULL)
2287 slice->cls_ops->clo_print(env, cookie, printer, slice);
2288 (*printer)(env, cookie, "\n");
2290 (*printer)(env, cookie, "} lock@%p\n", lock);
2292 EXPORT_SYMBOL(cl_lock_print);
2294 int cl_lock_init(void)
2296 return lu_kmem_init(cl_lock_caches);
2299 void cl_lock_fini(void)
2301 lu_kmem_fini(cl_lock_caches);