1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 # define EXPORT_SYMTAB
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_fid.h>
49 #include <libcfs/list.h>
50 /* lu_time_global_{init,fini}() */
53 #include <cl_object.h>
54 #include "cl_internal.h"
56 /** Lock class of cl_lock::cll_guard */
57 static cfs_lock_class_key_t cl_lock_guard_class;
58 static cfs_mem_cache_t *cl_lock_kmem;
60 static struct lu_kmem_descr cl_lock_caches[] = {
62 .ckd_cache = &cl_lock_kmem,
63 .ckd_name = "cl_lock_kmem",
64 .ckd_size = sizeof (struct cl_lock)
72 * Basic lock invariant that is maintained at all times. Caller either has a
73 * reference to \a lock, or somehow assures that \a lock cannot be freed.
75 * \see cl_lock_invariant()
77 static int cl_lock_invariant_trusted(const struct lu_env *env,
78 const struct cl_lock *lock)
82 ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
83 cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
84 lock->cll_holds >= lock->cll_users &&
85 lock->cll_holds >= 0 &&
86 lock->cll_users >= 0 &&
91 * Stronger lock invariant, checking that caller has a reference on a lock.
93 * \see cl_lock_invariant_trusted()
95 static int cl_lock_invariant(const struct lu_env *env,
96 const struct cl_lock *lock)
100 result = cfs_atomic_read(&lock->cll_ref) > 0 &&
101 cl_lock_invariant_trusted(env, lock);
102 if (!result && env != NULL)
103 CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
108 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
110 static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
112 return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
116 * Returns a set of counters for this lock, depending on a lock nesting.
118 static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
119 const struct cl_lock *lock)
121 struct cl_thread_info *info;
122 enum clt_nesting_level nesting;
124 info = cl_env_info(env);
125 nesting = cl_lock_nesting(lock);
126 LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
127 return &info->clt_counters[nesting];
130 static void cl_lock_trace0(int level, const struct lu_env *env,
131 const char *prefix, const struct cl_lock *lock,
132 const char *func, const int line)
134 struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
135 CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
136 "(%p/%d/%d) at %s():%d\n",
137 prefix, lock, cfs_atomic_read(&lock->cll_ref),
138 lock->cll_guarder, lock->cll_depth,
139 lock->cll_state, lock->cll_error, lock->cll_holds,
140 lock->cll_users, lock->cll_flags,
141 env, h->coh_nesting, cl_lock_nr_mutexed(env),
144 #define cl_lock_trace(level, env, prefix, lock) \
145 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
147 #define RETIP ((unsigned long)__builtin_return_address(0))
149 #ifdef CONFIG_LOCKDEP
150 static cfs_lock_class_key_t cl_lock_key;
152 static void cl_lock_lockdep_init(struct cl_lock *lock)
154 lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
157 static void cl_lock_lockdep_acquire(const struct lu_env *env,
158 struct cl_lock *lock, __u32 enqflags)
160 cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
161 #ifdef HAVE_LOCK_MAP_ACQUIRE
162 lock_map_acquire(&lock->dep_map);
163 #else /* HAVE_LOCK_MAP_ACQUIRE */
164 lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
165 /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
166 /* check: */ 2, RETIP);
167 #endif /* HAVE_LOCK_MAP_ACQUIRE */
170 static void cl_lock_lockdep_release(const struct lu_env *env,
171 struct cl_lock *lock)
173 cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
174 lock_release(&lock->dep_map, 0, RETIP);
177 #else /* !CONFIG_LOCKDEP */
179 static void cl_lock_lockdep_init(struct cl_lock *lock)
181 static void cl_lock_lockdep_acquire(const struct lu_env *env,
182 struct cl_lock *lock, __u32 enqflags)
184 static void cl_lock_lockdep_release(const struct lu_env *env,
185 struct cl_lock *lock)
188 #endif /* !CONFIG_LOCKDEP */
191 * Adds lock slice to the compound lock.
193 * This is called by cl_object_operations::coo_lock_init() methods to add a
194 * per-layer state to the lock. New state is added at the end of
195 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
197 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
199 void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
200 struct cl_object *obj,
201 const struct cl_lock_operations *ops)
204 slice->cls_lock = lock;
205 cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
206 slice->cls_obj = obj;
207 slice->cls_ops = ops;
210 EXPORT_SYMBOL(cl_lock_slice_add);
213 * Returns true iff a lock with the mode \a has provides at least the same
214 * guarantees as a lock with the mode \a need.
216 int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
218 LINVRNT(need == CLM_READ || need == CLM_WRITE ||
219 need == CLM_PHANTOM || need == CLM_GROUP);
220 LINVRNT(has == CLM_READ || has == CLM_WRITE ||
221 has == CLM_PHANTOM || has == CLM_GROUP);
222 CLASSERT(CLM_PHANTOM < CLM_READ);
223 CLASSERT(CLM_READ < CLM_WRITE);
224 CLASSERT(CLM_WRITE < CLM_GROUP);
226 if (has != CLM_GROUP)
231 EXPORT_SYMBOL(cl_lock_mode_match);
234 * Returns true iff extent portions of lock descriptions match.
236 int cl_lock_ext_match(const struct cl_lock_descr *has,
237 const struct cl_lock_descr *need)
240 has->cld_start <= need->cld_start &&
241 has->cld_end >= need->cld_end &&
242 cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
243 (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
245 EXPORT_SYMBOL(cl_lock_ext_match);
248 * Returns true iff a lock with the description \a has provides at least the
249 * same guarantees as a lock with the description \a need.
251 int cl_lock_descr_match(const struct cl_lock_descr *has,
252 const struct cl_lock_descr *need)
255 cl_object_same(has->cld_obj, need->cld_obj) &&
256 cl_lock_ext_match(has, need);
258 EXPORT_SYMBOL(cl_lock_descr_match);
260 static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
262 struct cl_object *obj = lock->cll_descr.cld_obj;
264 LASSERT(cl_is_lock(lock));
265 LINVRNT(!cl_lock_is_mutexed(lock));
268 cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
270 while (!cfs_list_empty(&lock->cll_layers)) {
271 struct cl_lock_slice *slice;
273 slice = cfs_list_entry(lock->cll_layers.next,
274 struct cl_lock_slice, cls_linkage);
275 cfs_list_del_init(lock->cll_layers.next);
276 slice->cls_ops->clo_fini(env, slice);
278 cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
279 cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
280 lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
281 cl_object_put(env, obj);
282 lu_ref_fini(&lock->cll_reference);
283 lu_ref_fini(&lock->cll_holders);
284 cfs_mutex_destroy(&lock->cll_guard);
285 OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
290 * Releases a reference on a lock.
292 * When last reference is released, lock is returned to the cache, unless it
293 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
296 * \see cl_object_put(), cl_page_put()
298 void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
300 struct cl_object *obj;
301 struct cl_object_header *head;
302 struct cl_site *site;
304 LINVRNT(cl_lock_invariant(env, lock));
306 obj = lock->cll_descr.cld_obj;
307 LINVRNT(obj != NULL);
308 head = cl_object_header(obj);
309 site = cl_object_site(obj);
311 CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
312 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
314 if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
315 if (lock->cll_state == CLS_FREEING) {
316 LASSERT(cfs_list_empty(&lock->cll_linkage));
317 cl_lock_free(env, lock);
319 cfs_atomic_dec(&site->cs_locks.cs_busy);
323 EXPORT_SYMBOL(cl_lock_put);
326 * Acquires an additional reference to a lock.
328 * This can be called only by caller already possessing a reference to \a
331 * \see cl_object_get(), cl_page_get()
333 void cl_lock_get(struct cl_lock *lock)
335 LINVRNT(cl_lock_invariant(NULL, lock));
336 CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
337 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
338 cfs_atomic_inc(&lock->cll_ref);
340 EXPORT_SYMBOL(cl_lock_get);
343 * Acquires a reference to a lock.
345 * This is much like cl_lock_get(), except that this function can be used to
346 * acquire initial reference to the cached lock. Caller has to deal with all
347 * possible races. Use with care!
349 * \see cl_page_get_trust()
351 void cl_lock_get_trust(struct cl_lock *lock)
353 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
355 LASSERT(cl_is_lock(lock));
356 CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
357 cfs_atomic_read(&lock->cll_ref), lock, RETIP);
358 if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
359 cfs_atomic_inc(&site->cs_locks.cs_busy);
361 EXPORT_SYMBOL(cl_lock_get_trust);
364 * Helper function destroying the lock that wasn't completely initialized.
366 * Other threads can acquire references to the top-lock through its
367 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
369 static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
371 cl_lock_mutex_get(env, lock);
372 cl_lock_cancel(env, lock);
373 cl_lock_delete(env, lock);
374 cl_lock_mutex_put(env, lock);
375 cl_lock_put(env, lock);
378 static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
379 struct cl_object *obj,
380 const struct cl_io *io,
381 const struct cl_lock_descr *descr)
383 struct cl_lock *lock;
384 struct lu_object_header *head;
385 struct cl_site *site = cl_object_site(obj);
388 OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
390 cfs_atomic_set(&lock->cll_ref, 1);
391 lock->cll_descr = *descr;
392 lock->cll_state = CLS_NEW;
394 lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
396 CFS_INIT_LIST_HEAD(&lock->cll_layers);
397 CFS_INIT_LIST_HEAD(&lock->cll_linkage);
398 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
399 lu_ref_init(&lock->cll_reference);
400 lu_ref_init(&lock->cll_holders);
401 cfs_mutex_init(&lock->cll_guard);
402 cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
403 cfs_waitq_init(&lock->cll_wq);
404 head = obj->co_lu.lo_header;
405 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
406 cfs_atomic_inc(&site->cs_locks.cs_total);
407 cfs_atomic_inc(&site->cs_locks.cs_created);
408 cl_lock_lockdep_init(lock);
409 cfs_list_for_each_entry(obj, &head->loh_layers,
413 err = obj->co_ops->coo_lock_init(env, obj, lock, io);
415 cl_lock_finish(env, lock);
421 lock = ERR_PTR(-ENOMEM);
426 * Transfer the lock into INTRANSIT state and return the original state.
428 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
429 * \post state: CLS_INTRANSIT
432 enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
433 struct cl_lock *lock)
435 enum cl_lock_state state = lock->cll_state;
437 LASSERT(cl_lock_is_mutexed(lock));
438 LASSERT(state != CLS_INTRANSIT);
439 LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
440 "Malformed lock state %d.\n", state);
442 cl_lock_state_set(env, lock, CLS_INTRANSIT);
443 lock->cll_intransit_owner = cfs_current();
444 cl_lock_hold_add(env, lock, "intransit", cfs_current());
447 EXPORT_SYMBOL(cl_lock_intransit);
450 * Exit the intransit state and restore the lock state to the original state
452 void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
453 enum cl_lock_state state)
455 LASSERT(cl_lock_is_mutexed(lock));
456 LASSERT(lock->cll_state == CLS_INTRANSIT);
457 LASSERT(state != CLS_INTRANSIT);
458 LASSERT(lock->cll_intransit_owner == cfs_current());
460 lock->cll_intransit_owner = NULL;
461 cl_lock_state_set(env, lock, state);
462 cl_lock_unhold(env, lock, "intransit", cfs_current());
464 EXPORT_SYMBOL(cl_lock_extransit);
467 * Checking whether the lock is intransit state
469 int cl_lock_is_intransit(struct cl_lock *lock)
471 LASSERT(cl_lock_is_mutexed(lock));
472 return lock->cll_state == CLS_INTRANSIT &&
473 lock->cll_intransit_owner != cfs_current();
475 EXPORT_SYMBOL(cl_lock_is_intransit);
477 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
478 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
479 * cover multiple stripes and can trigger cascading timeouts.
481 static int cl_lock_fits_into(const struct lu_env *env,
482 const struct cl_lock *lock,
483 const struct cl_lock_descr *need,
484 const struct cl_io *io)
486 const struct cl_lock_slice *slice;
488 LINVRNT(cl_lock_invariant_trusted(env, lock));
490 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
491 if (slice->cls_ops->clo_fits_into != NULL &&
492 !slice->cls_ops->clo_fits_into(env, slice, need, io))
498 static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
499 struct cl_object *obj,
500 const struct cl_io *io,
501 const struct cl_lock_descr *need)
503 struct cl_lock *lock;
504 struct cl_object_header *head;
505 struct cl_site *site;
509 head = cl_object_header(obj);
510 site = cl_object_site(obj);
511 LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
512 cfs_atomic_inc(&site->cs_locks.cs_lookup);
513 cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
516 LASSERT(cl_is_lock(lock));
517 matched = cl_lock_ext_match(&lock->cll_descr, need) &&
518 lock->cll_state < CLS_FREEING &&
519 lock->cll_error == 0 &&
520 !(lock->cll_flags & CLF_CANCELLED) &&
521 cl_lock_fits_into(env, lock, need, io);
522 CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
523 PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
526 cl_lock_get_trust(lock);
527 cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
535 * Returns a lock matching description \a need.
537 * This is the main entry point into the cl_lock caching interface. First, a
538 * cache (implemented as a per-object linked list) is consulted. If lock is
539 * found there, it is returned immediately. Otherwise new lock is allocated
540 * and returned. In any case, additional reference to lock is acquired.
542 * \see cl_object_find(), cl_page_find()
544 static struct cl_lock *cl_lock_find(const struct lu_env *env,
545 const struct cl_io *io,
546 const struct cl_lock_descr *need)
548 struct cl_object_header *head;
549 struct cl_object *obj;
550 struct cl_lock *lock;
551 struct cl_site *site;
556 head = cl_object_header(obj);
557 site = cl_object_site(obj);
559 cfs_spin_lock(&head->coh_lock_guard);
560 lock = cl_lock_lookup(env, obj, io, need);
561 cfs_spin_unlock(&head->coh_lock_guard);
564 lock = cl_lock_alloc(env, obj, io, need);
566 struct cl_lock *ghost;
568 cfs_spin_lock(&head->coh_lock_guard);
569 ghost = cl_lock_lookup(env, obj, io, need);
571 cfs_list_add_tail(&lock->cll_linkage,
573 cfs_spin_unlock(&head->coh_lock_guard);
574 cfs_atomic_inc(&site->cs_locks.cs_busy);
576 cfs_spin_unlock(&head->coh_lock_guard);
578 * Other threads can acquire references to the
579 * top-lock through its sub-locks. Hence, it
580 * cannot be cl_lock_free()-ed immediately.
582 cl_lock_finish(env, lock);
591 * Returns existing lock matching given description. This is similar to
592 * cl_lock_find() except that no new lock is created, and returned lock is
593 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
595 struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
596 const struct cl_lock_descr *need,
597 const char *scope, const void *source)
599 struct cl_object_header *head;
600 struct cl_object *obj;
601 struct cl_lock *lock;
605 head = cl_object_header(obj);
607 cfs_spin_lock(&head->coh_lock_guard);
608 lock = cl_lock_lookup(env, obj, io, need);
609 cfs_spin_unlock(&head->coh_lock_guard);
614 cl_lock_mutex_get(env, lock);
615 if (lock->cll_state == CLS_INTRANSIT)
616 cl_lock_state_wait(env, lock); /* Don't care return value. */
617 if (lock->cll_state == CLS_CACHED) {
619 result = cl_use_try(env, lock, 1);
621 cl_lock_error(env, lock, result);
623 ok = lock->cll_state == CLS_HELD;
625 cl_lock_hold_add(env, lock, scope, source);
626 cl_lock_user_add(env, lock);
627 cl_lock_put(env, lock);
629 cl_lock_mutex_put(env, lock);
631 cl_lock_put(env, lock);
637 EXPORT_SYMBOL(cl_lock_peek);
640 * Returns a slice within a lock, corresponding to the given layer in the
645 const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
646 const struct lu_device_type *dtype)
648 const struct cl_lock_slice *slice;
650 LINVRNT(cl_lock_invariant_trusted(NULL, lock));
653 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
654 if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
659 EXPORT_SYMBOL(cl_lock_at);
661 static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
663 struct cl_thread_counters *counters;
665 counters = cl_lock_counters(env, lock);
667 counters->ctc_nr_locks_locked++;
668 lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
669 cl_lock_trace(D_TRACE, env, "got mutex", lock);
673 * Locks cl_lock object.
675 * This is used to manipulate cl_lock fields, and to serialize state
676 * transitions in the lock state machine.
678 * \post cl_lock_is_mutexed(lock)
680 * \see cl_lock_mutex_put()
682 void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
684 LINVRNT(cl_lock_invariant(env, lock));
686 if (lock->cll_guarder == cfs_current()) {
687 LINVRNT(cl_lock_is_mutexed(lock));
688 LINVRNT(lock->cll_depth > 0);
690 struct cl_object_header *hdr;
691 struct cl_thread_info *info;
694 LINVRNT(lock->cll_guarder != cfs_current());
695 hdr = cl_object_header(lock->cll_descr.cld_obj);
697 * Check that mutices are taken in the bottom-to-top order.
699 info = cl_env_info(env);
700 for (i = 0; i < hdr->coh_nesting; ++i)
701 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
702 cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
703 lock->cll_guarder = cfs_current();
704 LINVRNT(lock->cll_depth == 0);
706 cl_lock_mutex_tail(env, lock);
708 EXPORT_SYMBOL(cl_lock_mutex_get);
711 * Try-locks cl_lock object.
713 * \retval 0 \a lock was successfully locked
715 * \retval -EBUSY \a lock cannot be locked right now
717 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
719 * \see cl_lock_mutex_get()
721 int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
725 LINVRNT(cl_lock_invariant_trusted(env, lock));
729 if (lock->cll_guarder == cfs_current()) {
730 LINVRNT(lock->cll_depth > 0);
731 cl_lock_mutex_tail(env, lock);
732 } else if (cfs_mutex_trylock(&lock->cll_guard)) {
733 LINVRNT(lock->cll_depth == 0);
734 lock->cll_guarder = cfs_current();
735 cl_lock_mutex_tail(env, lock);
740 EXPORT_SYMBOL(cl_lock_mutex_try);
743 {* Unlocks cl_lock object.
745 * \pre cl_lock_is_mutexed(lock)
747 * \see cl_lock_mutex_get()
749 void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
751 struct cl_thread_counters *counters;
753 LINVRNT(cl_lock_invariant(env, lock));
754 LINVRNT(cl_lock_is_mutexed(lock));
755 LINVRNT(lock->cll_guarder == cfs_current());
756 LINVRNT(lock->cll_depth > 0);
758 counters = cl_lock_counters(env, lock);
759 LINVRNT(counters->ctc_nr_locks_locked > 0);
761 cl_lock_trace(D_TRACE, env, "put mutex", lock);
762 lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
763 counters->ctc_nr_locks_locked--;
764 if (--lock->cll_depth == 0) {
765 lock->cll_guarder = NULL;
766 cfs_mutex_unlock(&lock->cll_guard);
769 EXPORT_SYMBOL(cl_lock_mutex_put);
772 * Returns true iff lock's mutex is owned by the current thread.
774 int cl_lock_is_mutexed(struct cl_lock *lock)
776 return lock->cll_guarder == cfs_current();
778 EXPORT_SYMBOL(cl_lock_is_mutexed);
781 * Returns number of cl_lock mutices held by the current thread (environment).
783 int cl_lock_nr_mutexed(const struct lu_env *env)
785 struct cl_thread_info *info;
790 * NOTE: if summation across all nesting levels (currently 2) proves
791 * too expensive, a summary counter can be added to
792 * struct cl_thread_info.
794 info = cl_env_info(env);
795 for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
796 locked += info->clt_counters[i].ctc_nr_locks_locked;
799 EXPORT_SYMBOL(cl_lock_nr_mutexed);
801 static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
803 LINVRNT(cl_lock_is_mutexed(lock));
804 LINVRNT(cl_lock_invariant(env, lock));
806 if (!(lock->cll_flags & CLF_CANCELLED)) {
807 const struct cl_lock_slice *slice;
809 lock->cll_flags |= CLF_CANCELLED;
810 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
812 if (slice->cls_ops->clo_cancel != NULL)
813 slice->cls_ops->clo_cancel(env, slice);
819 static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
821 struct cl_object_header *head;
822 const struct cl_lock_slice *slice;
824 LINVRNT(cl_lock_is_mutexed(lock));
825 LINVRNT(cl_lock_invariant(env, lock));
828 if (lock->cll_state < CLS_FREEING) {
829 LASSERT(lock->cll_state != CLS_INTRANSIT);
830 cl_lock_state_set(env, lock, CLS_FREEING);
832 head = cl_object_header(lock->cll_descr.cld_obj);
834 cfs_spin_lock(&head->coh_lock_guard);
835 cfs_list_del_init(&lock->cll_linkage);
837 cfs_spin_unlock(&head->coh_lock_guard);
839 * From now on, no new references to this lock can be acquired
840 * by cl_lock_lookup().
842 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
844 if (slice->cls_ops->clo_delete != NULL)
845 slice->cls_ops->clo_delete(env, slice);
848 * From now on, no new references to this lock can be acquired
849 * by layer-specific means (like a pointer from struct
850 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
853 * Lock will be finally freed in cl_lock_put() when last of
854 * existing references goes away.
861 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
862 * top-lock (nesting == 0) accounts for this modification in the per-thread
863 * debugging counters. Sub-lock holds can be released by a thread different
864 * from one that acquired it.
866 static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
869 struct cl_thread_counters *counters;
870 enum clt_nesting_level nesting;
872 lock->cll_holds += delta;
873 nesting = cl_lock_nesting(lock);
874 if (nesting == CNL_TOP) {
875 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
876 counters->ctc_nr_held += delta;
877 LASSERT(counters->ctc_nr_held >= 0);
882 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
883 * cl_lock_hold_mod() for the explanation of the debugging code.
885 static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
888 struct cl_thread_counters *counters;
889 enum clt_nesting_level nesting;
891 lock->cll_users += delta;
892 nesting = cl_lock_nesting(lock);
893 if (nesting == CNL_TOP) {
894 counters = &cl_env_info(env)->clt_counters[CNL_TOP];
895 counters->ctc_nr_used += delta;
896 LASSERT(counters->ctc_nr_used >= 0);
900 static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
901 const char *scope, const void *source)
903 LINVRNT(cl_lock_is_mutexed(lock));
904 LINVRNT(cl_lock_invariant(env, lock));
905 LASSERT(lock->cll_holds > 0);
908 cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
909 lu_ref_del(&lock->cll_holders, scope, source);
910 cl_lock_hold_mod(env, lock, -1);
911 if (lock->cll_holds == 0) {
912 if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
913 lock->cll_descr.cld_mode == CLM_GROUP)
915 * If lock is still phantom or grouplock when user is
916 * done with it---destroy the lock.
918 lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
919 if (lock->cll_flags & CLF_CANCELPEND) {
920 lock->cll_flags &= ~CLF_CANCELPEND;
921 cl_lock_cancel0(env, lock);
923 if (lock->cll_flags & CLF_DOOMED) {
924 /* no longer doomed: it's dead... Jim. */
925 lock->cll_flags &= ~CLF_DOOMED;
926 cl_lock_delete0(env, lock);
934 * Waits until lock state is changed.
936 * This function is called with cl_lock mutex locked, atomically releases
937 * mutex and goes to sleep, waiting for a lock state change (signaled by
938 * cl_lock_signal()), and re-acquires the mutex before return.
940 * This function is used to wait until lock state machine makes some progress
941 * and to emulate synchronous operations on top of asynchronous lock
944 * \retval -EINTR wait was interrupted
946 * \retval 0 wait wasn't interrupted
948 * \pre cl_lock_is_mutexed(lock)
950 * \see cl_lock_signal()
952 int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
954 cfs_waitlink_t waiter;
958 LINVRNT(cl_lock_is_mutexed(lock));
959 LINVRNT(cl_lock_invariant(env, lock));
960 LASSERT(lock->cll_depth == 1);
961 LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
963 cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
964 result = lock->cll_error;
966 cfs_waitlink_init(&waiter);
967 cfs_waitq_add(&lock->cll_wq, &waiter);
968 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
969 cl_lock_mutex_put(env, lock);
971 LASSERT(cl_lock_nr_mutexed(env) == 0);
972 cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
974 cl_lock_mutex_get(env, lock);
975 cfs_set_current_state(CFS_TASK_RUNNING);
976 cfs_waitq_del(&lock->cll_wq, &waiter);
977 result = cfs_signal_pending() ? -EINTR : 0;
981 EXPORT_SYMBOL(cl_lock_state_wait);
983 static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
984 enum cl_lock_state state)
986 const struct cl_lock_slice *slice;
989 LINVRNT(cl_lock_is_mutexed(lock));
990 LINVRNT(cl_lock_invariant(env, lock));
992 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
993 if (slice->cls_ops->clo_state != NULL)
994 slice->cls_ops->clo_state(env, slice, state);
995 cfs_waitq_broadcast(&lock->cll_wq);
1000 * Notifies waiters that lock state changed.
1002 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1003 * layers about state change by calling cl_lock_operations::clo_state()
1006 void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
1009 cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
1010 cl_lock_state_signal(env, lock, lock->cll_state);
1013 EXPORT_SYMBOL(cl_lock_signal);
1016 * Changes lock state.
1018 * This function is invoked to notify layers that lock state changed, possible
1019 * as a result of an asynchronous event such as call-back reception.
1021 * \post lock->cll_state == state
1023 * \see cl_lock_operations::clo_state()
1025 void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
1026 enum cl_lock_state state)
1028 struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
1031 LASSERT(lock->cll_state <= state ||
1032 (lock->cll_state == CLS_CACHED &&
1033 (state == CLS_HELD || /* lock found in cache */
1034 state == CLS_NEW || /* sub-lock canceled */
1035 state == CLS_INTRANSIT)) ||
1036 /* lock is in transit state */
1037 lock->cll_state == CLS_INTRANSIT);
1039 if (lock->cll_state != state) {
1040 cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
1041 cfs_atomic_inc(&site->cs_locks_state[state]);
1043 cl_lock_state_signal(env, lock, state);
1044 lock->cll_state = state;
1048 EXPORT_SYMBOL(cl_lock_state_set);
1050 static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
1052 const struct cl_lock_slice *slice;
1058 LINVRNT(cl_lock_is_mutexed(lock));
1059 LINVRNT(cl_lock_invariant(env, lock));
1060 LASSERT(lock->cll_state == CLS_INTRANSIT);
1063 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
1065 if (slice->cls_ops->clo_unuse != NULL) {
1066 result = slice->cls_ops->clo_unuse(env, slice);
1071 LASSERT(result != -ENOSYS);
1072 } while (result == CLO_REPEAT);
1078 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1079 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1080 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1081 * use process atomic
1083 int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
1085 const struct cl_lock_slice *slice;
1087 enum cl_lock_state state;
1090 cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
1092 LASSERT(lock->cll_state == CLS_CACHED);
1093 if (lock->cll_error)
1094 RETURN(lock->cll_error);
1097 state = cl_lock_intransit(env, lock);
1098 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1099 if (slice->cls_ops->clo_use != NULL) {
1100 result = slice->cls_ops->clo_use(env, slice);
1105 LASSERT(result != -ENOSYS);
1107 LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
1113 if (result == -ESTALE) {
1115 * ESTALE means sublock being cancelled
1116 * at this time, and set lock state to
1117 * be NEW here and ask the caller to repeat.
1120 result = CLO_REPEAT;
1123 /* @atomic means back-off-on-failure. */
1126 rc = cl_unuse_try_internal(env, lock);
1127 /* Vet the results. */
1128 if (rc < 0 && result > 0)
1133 cl_lock_extransit(env, lock, state);
1136 EXPORT_SYMBOL(cl_use_try);
1139 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1142 static int cl_enqueue_kick(const struct lu_env *env,
1143 struct cl_lock *lock,
1144 struct cl_io *io, __u32 flags)
1147 const struct cl_lock_slice *slice;
1151 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1152 if (slice->cls_ops->clo_enqueue != NULL) {
1153 result = slice->cls_ops->clo_enqueue(env,
1159 LASSERT(result != -ENOSYS);
1164 * Tries to enqueue a lock.
1166 * This function is called repeatedly by cl_enqueue() until either lock is
1167 * enqueued, or error occurs. This function does not block waiting for
1168 * networking communication to complete.
1170 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1171 * lock->cll_state == CLS_HELD)
1173 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1174 * \see cl_lock_state::CLS_ENQUEUED
1176 int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
1177 struct cl_io *io, __u32 flags)
1182 cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
1186 LINVRNT(cl_lock_is_mutexed(lock));
1188 if (lock->cll_error != 0)
1190 switch (lock->cll_state) {
1192 cl_lock_state_set(env, lock, CLS_QUEUING);
1196 result = cl_enqueue_kick(env, lock, io, flags);
1198 cl_lock_state_set(env, lock, CLS_ENQUEUED);
1201 LASSERT(cl_lock_is_intransit(lock));
1205 /* yank lock from the cache. */
1206 result = cl_use_try(env, lock, 0);
1215 * impossible, only held locks with increased
1216 * ->cll_holds can be enqueued, and they cannot be
1221 } while (result == CLO_REPEAT);
1223 cl_lock_error(env, lock, result);
1224 RETURN(result ?: lock->cll_error);
1226 EXPORT_SYMBOL(cl_enqueue_try);
1229 * Cancel the conflicting lock found during previous enqueue.
1231 * \retval 0 conflicting lock has been canceled.
1232 * \retval -ve error code.
1234 int cl_lock_enqueue_wait(const struct lu_env *env,
1235 struct cl_lock *lock,
1238 struct cl_lock *conflict;
1242 LASSERT(cl_lock_is_mutexed(lock));
1243 LASSERT(lock->cll_state == CLS_QUEUING);
1244 LASSERT(lock->cll_conflict != NULL);
1246 conflict = lock->cll_conflict;
1247 lock->cll_conflict = NULL;
1249 cl_lock_mutex_put(env, lock);
1250 LASSERT(cl_lock_nr_mutexed(env) == 0);
1252 cl_lock_mutex_get(env, conflict);
1253 cl_lock_cancel(env, conflict);
1254 cl_lock_delete(env, conflict);
1256 while (conflict->cll_state != CLS_FREEING) {
1257 rc = cl_lock_state_wait(env, conflict);
1261 cl_lock_mutex_put(env, conflict);
1262 lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
1263 cl_lock_put(env, conflict);
1266 cl_lock_mutex_get(env, lock);
1271 EXPORT_SYMBOL(cl_lock_enqueue_wait);
1273 static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
1274 struct cl_io *io, __u32 enqflags)
1280 LINVRNT(cl_lock_is_mutexed(lock));
1281 LINVRNT(cl_lock_invariant(env, lock));
1282 LASSERT(lock->cll_holds > 0);
1284 cl_lock_user_add(env, lock);
1286 result = cl_enqueue_try(env, lock, io, enqflags);
1287 if (result == CLO_WAIT) {
1288 if (lock->cll_conflict != NULL)
1289 result = cl_lock_enqueue_wait(env, lock, 1);
1291 result = cl_lock_state_wait(env, lock);
1298 cl_lock_user_del(env, lock);
1299 cl_lock_error(env, lock, result);
1301 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1302 lock->cll_state == CLS_HELD));
1309 * \pre current thread or io owns a hold on lock.
1311 * \post ergo(result == 0, lock->users increased)
1312 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1313 * lock->cll_state == CLS_HELD)
1315 int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
1316 struct cl_io *io, __u32 enqflags)
1322 cl_lock_lockdep_acquire(env, lock, enqflags);
1323 cl_lock_mutex_get(env, lock);
1324 result = cl_enqueue_locked(env, lock, io, enqflags);
1325 cl_lock_mutex_put(env, lock);
1327 cl_lock_lockdep_release(env, lock);
1328 LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1329 lock->cll_state == CLS_HELD));
1332 EXPORT_SYMBOL(cl_enqueue);
1335 * Tries to unlock a lock.
1337 * This function is called repeatedly by cl_unuse() until either lock is
1338 * unlocked, or error occurs.
1339 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1341 * \pre lock->cll_state == CLS_HELD
1343 * \post ergo(result == 0, lock->cll_state == CLS_CACHED)
1345 * \see cl_unuse() cl_lock_operations::clo_unuse()
1346 * \see cl_lock_state::CLS_CACHED
1348 int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
1351 enum cl_lock_state state = CLS_NEW;
1354 cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
1356 LASSERT(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED);
1357 if (lock->cll_users > 1) {
1358 cl_lock_user_del(env, lock);
1363 * New lock users (->cll_users) are not protecting unlocking
1364 * from proceeding. From this point, lock eventually reaches
1365 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1368 state = cl_lock_intransit(env, lock);
1370 result = cl_unuse_try_internal(env, lock);
1371 LASSERT(lock->cll_state == CLS_INTRANSIT);
1372 LASSERT(result != CLO_WAIT);
1373 cl_lock_user_del(env, lock);
1374 if (result == 0 || result == -ESTALE) {
1376 * Return lock back to the cache. This is the only
1377 * place where lock is moved into CLS_CACHED state.
1379 * If one of ->clo_unuse() methods returned -ESTALE, lock
1380 * cannot be placed into cache and has to be
1381 * re-initialized. This happens e.g., when a sub-lock was
1382 * canceled while unlocking was in progress.
1384 if (state == CLS_HELD && result == 0)
1388 cl_lock_extransit(env, lock, state);
1391 * Hide -ESTALE error.
1392 * If the lock is a glimpse lock, and it has multiple
1393 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1394 * and other sublocks are matched write locks. In this case,
1395 * we can't set this lock to error because otherwise some of
1396 * its sublocks may not be canceled. This causes some dirty
1397 * pages won't be written to OSTs. -jay
1401 CERROR("result = %d, this is unlikely!\n", result);
1402 cl_lock_extransit(env, lock, state);
1405 result = result ?: lock->cll_error;
1407 cl_lock_error(env, lock, result);
1410 EXPORT_SYMBOL(cl_unuse_try);
1412 static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
1417 result = cl_unuse_try(env, lock);
1419 CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
1427 void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
1430 cl_lock_mutex_get(env, lock);
1431 cl_unuse_locked(env, lock);
1432 cl_lock_mutex_put(env, lock);
1433 cl_lock_lockdep_release(env, lock);
1436 EXPORT_SYMBOL(cl_unuse);
1439 * Tries to wait for a lock.
1441 * This function is called repeatedly by cl_wait() until either lock is
1442 * granted, or error occurs. This function does not block waiting for network
1443 * communication to complete.
1445 * \see cl_wait() cl_lock_operations::clo_wait()
1446 * \see cl_lock_state::CLS_HELD
1448 int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
1450 const struct cl_lock_slice *slice;
1454 cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
1456 LINVRNT(cl_lock_is_mutexed(lock));
1457 LINVRNT(cl_lock_invariant(env, lock));
1458 LASSERT(lock->cll_state == CLS_ENQUEUED ||
1459 lock->cll_state == CLS_HELD ||
1460 lock->cll_state == CLS_INTRANSIT);
1461 LASSERT(lock->cll_users > 0);
1462 LASSERT(lock->cll_holds > 0);
1465 if (lock->cll_error != 0)
1468 if (cl_lock_is_intransit(lock)) {
1473 if (lock->cll_state == CLS_HELD)
1478 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1479 if (slice->cls_ops->clo_wait != NULL) {
1480 result = slice->cls_ops->clo_wait(env, slice);
1485 LASSERT(result != -ENOSYS);
1487 LASSERT(lock->cll_state != CLS_INTRANSIT);
1488 cl_lock_state_set(env, lock, CLS_HELD);
1490 } while (result == CLO_REPEAT);
1491 RETURN(result ?: lock->cll_error);
1493 EXPORT_SYMBOL(cl_wait_try);
1496 * Waits until enqueued lock is granted.
1498 * \pre current thread or io owns a hold on the lock
1499 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1500 * lock->cll_state == CLS_HELD)
1502 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1504 int cl_wait(const struct lu_env *env, struct cl_lock *lock)
1509 cl_lock_mutex_get(env, lock);
1511 LINVRNT(cl_lock_invariant(env, lock));
1512 LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
1513 "Wrong state %d \n", lock->cll_state);
1514 LASSERT(lock->cll_holds > 0);
1517 result = cl_wait_try(env, lock);
1518 if (result == CLO_WAIT) {
1519 result = cl_lock_state_wait(env, lock);
1526 cl_lock_user_del(env, lock);
1527 cl_lock_error(env, lock, result);
1528 cl_lock_lockdep_release(env, lock);
1530 cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
1531 cl_lock_mutex_put(env, lock);
1532 LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
1535 EXPORT_SYMBOL(cl_wait);
1538 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1541 unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
1543 const struct cl_lock_slice *slice;
1544 unsigned long pound;
1545 unsigned long ounce;
1548 LINVRNT(cl_lock_is_mutexed(lock));
1549 LINVRNT(cl_lock_invariant(env, lock));
1552 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1553 if (slice->cls_ops->clo_weigh != NULL) {
1554 ounce = slice->cls_ops->clo_weigh(env, slice);
1556 if (pound < ounce) /* over-weight^Wflow */
1562 EXPORT_SYMBOL(cl_lock_weigh);
1565 * Notifies layers that lock description changed.
1567 * The server can grant client a lock different from one that was requested
1568 * (e.g., larger in extent). This method is called when actually granted lock
1569 * description becomes known to let layers to accommodate for changed lock
1572 * \see cl_lock_operations::clo_modify()
1574 int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
1575 const struct cl_lock_descr *desc)
1577 const struct cl_lock_slice *slice;
1578 struct cl_object *obj = lock->cll_descr.cld_obj;
1579 struct cl_object_header *hdr = cl_object_header(obj);
1583 cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
1584 /* don't allow object to change */
1585 LASSERT(obj == desc->cld_obj);
1586 LINVRNT(cl_lock_is_mutexed(lock));
1587 LINVRNT(cl_lock_invariant(env, lock));
1589 cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
1590 if (slice->cls_ops->clo_modify != NULL) {
1591 result = slice->cls_ops->clo_modify(env, slice, desc);
1596 CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
1597 PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
1599 * Just replace description in place. Nothing more is needed for
1600 * now. If locks were indexed according to their extent and/or mode,
1601 * that index would have to be updated here.
1603 cfs_spin_lock(&hdr->coh_lock_guard);
1604 lock->cll_descr = *desc;
1605 cfs_spin_unlock(&hdr->coh_lock_guard);
1608 EXPORT_SYMBOL(cl_lock_modify);
1611 * Initializes lock closure with a given origin.
1613 * \see cl_lock_closure
1615 void cl_lock_closure_init(const struct lu_env *env,
1616 struct cl_lock_closure *closure,
1617 struct cl_lock *origin, int wait)
1619 LINVRNT(cl_lock_is_mutexed(origin));
1620 LINVRNT(cl_lock_invariant(env, origin));
1622 CFS_INIT_LIST_HEAD(&closure->clc_list);
1623 closure->clc_origin = origin;
1624 closure->clc_wait = wait;
1625 closure->clc_nr = 0;
1627 EXPORT_SYMBOL(cl_lock_closure_init);
1630 * Builds a closure of \a lock.
1632 * Building of a closure consists of adding initial lock (\a lock) into it,
1633 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1634 * methods might call cl_lock_closure_build() recursively again, adding more
1635 * locks to the closure, etc.
1637 * \see cl_lock_closure
1639 int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
1640 struct cl_lock_closure *closure)
1642 const struct cl_lock_slice *slice;
1646 LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
1647 LINVRNT(cl_lock_invariant(env, closure->clc_origin));
1649 result = cl_lock_enclosure(env, lock, closure);
1651 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
1652 if (slice->cls_ops->clo_closure != NULL) {
1653 result = slice->cls_ops->clo_closure(env, slice,
1661 cl_lock_disclosure(env, closure);
1664 EXPORT_SYMBOL(cl_lock_closure_build);
1667 * Adds new lock to a closure.
1669 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1670 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1671 * until next try-lock is likely to succeed.
1673 int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
1674 struct cl_lock_closure *closure)
1678 cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
1679 if (!cl_lock_mutex_try(env, lock)) {
1681 * If lock->cll_inclosure is not empty, lock is already in
1684 if (cfs_list_empty(&lock->cll_inclosure)) {
1685 cl_lock_get_trust(lock);
1686 lu_ref_add(&lock->cll_reference, "closure", closure);
1687 cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
1690 cl_lock_mutex_put(env, lock);
1693 cl_lock_disclosure(env, closure);
1694 if (closure->clc_wait) {
1695 cl_lock_get_trust(lock);
1696 lu_ref_add(&lock->cll_reference, "closure-w", closure);
1697 cl_lock_mutex_put(env, closure->clc_origin);
1699 LASSERT(cl_lock_nr_mutexed(env) == 0);
1700 cl_lock_mutex_get(env, lock);
1701 cl_lock_mutex_put(env, lock);
1703 cl_lock_mutex_get(env, closure->clc_origin);
1704 lu_ref_del(&lock->cll_reference, "closure-w", closure);
1705 cl_lock_put(env, lock);
1707 result = CLO_REPEAT;
1711 EXPORT_SYMBOL(cl_lock_enclosure);
1713 /** Releases mutices of enclosed locks. */
1714 void cl_lock_disclosure(const struct lu_env *env,
1715 struct cl_lock_closure *closure)
1717 struct cl_lock *scan;
1718 struct cl_lock *temp;
1720 cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
1721 cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
1723 cfs_list_del_init(&scan->cll_inclosure);
1724 cl_lock_mutex_put(env, scan);
1725 lu_ref_del(&scan->cll_reference, "closure", closure);
1726 cl_lock_put(env, scan);
1729 LASSERT(closure->clc_nr == 0);
1731 EXPORT_SYMBOL(cl_lock_disclosure);
1733 /** Finalizes a closure. */
1734 void cl_lock_closure_fini(struct cl_lock_closure *closure)
1736 LASSERT(closure->clc_nr == 0);
1737 LASSERT(cfs_list_empty(&closure->clc_list));
1739 EXPORT_SYMBOL(cl_lock_closure_fini);
1742 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1743 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1744 * destruction until all holds are released. This is called when a decision is
1745 * made to destroy the lock in the future. E.g., when a blocking AST is
1746 * received on it, or fatal communication error happens.
1748 * Caller must have a reference on this lock to prevent a situation, when
1749 * deleted lock lingers in memory for indefinite time, because nobody calls
1750 * cl_lock_put() to finish it.
1752 * \pre atomic_read(&lock->cll_ref) > 0
1753 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1754 * cl_lock_nr_mutexed(env) == 1)
1755 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1756 * held, as deletion of sub-locks might require releasing a top-lock
1759 * \see cl_lock_operations::clo_delete()
1760 * \see cl_lock::cll_holds
1762 void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
1764 LINVRNT(cl_lock_is_mutexed(lock));
1765 LINVRNT(cl_lock_invariant(env, lock));
1766 LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
1767 cl_lock_nr_mutexed(env) == 1));
1770 cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
1771 if (lock->cll_holds == 0)
1772 cl_lock_delete0(env, lock);
1774 lock->cll_flags |= CLF_DOOMED;
1777 EXPORT_SYMBOL(cl_lock_delete);
1780 * Mark lock as irrecoverably failed, and mark it for destruction. This
1781 * happens when, e.g., server fails to grant a lock to us, or networking
1784 * \pre atomic_read(&lock->cll_ref) > 0
1786 * \see clo_lock_delete()
1787 * \see cl_lock::cll_holds
1789 void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
1791 LINVRNT(cl_lock_is_mutexed(lock));
1792 LINVRNT(cl_lock_invariant(env, lock));
1795 cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
1796 if (lock->cll_error == 0 && error != 0) {
1797 lock->cll_error = error;
1798 cl_lock_signal(env, lock);
1799 cl_lock_cancel(env, lock);
1800 cl_lock_delete(env, lock);
1804 EXPORT_SYMBOL(cl_lock_error);
1807 * Cancels this lock. Notifies layers
1808 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1809 * there are holds on the lock, postpone cancellation until
1810 * all holds are released.
1812 * Cancellation notification is delivered to layers at most once.
1814 * \see cl_lock_operations::clo_cancel()
1815 * \see cl_lock::cll_holds
1817 void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
1819 LINVRNT(cl_lock_is_mutexed(lock));
1820 LINVRNT(cl_lock_invariant(env, lock));
1823 cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
1824 if (lock->cll_holds == 0)
1825 cl_lock_cancel0(env, lock);
1827 lock->cll_flags |= CLF_CANCELPEND;
1830 EXPORT_SYMBOL(cl_lock_cancel);
1833 * Finds an existing lock covering given page and optionally different from a
1834 * given \a except lock.
1836 struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj,
1837 struct cl_page *page, struct cl_lock *except,
1838 int pending, int canceld)
1840 struct cl_object_header *head;
1841 struct cl_lock *scan;
1842 struct cl_lock *lock;
1843 struct cl_lock_descr *need;
1847 head = cl_object_header(obj);
1848 need = &cl_env_info(env)->clt_descr;
1851 need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
1853 need->cld_start = need->cld_end = page->cp_index;
1854 need->cld_enq_flags = 0;
1856 cfs_spin_lock(&head->coh_lock_guard);
1857 /* It is fine to match any group lock since there could be only one
1858 * with a uniq gid and it conflicts with all other lock modes too */
1859 cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
1860 if (scan != except &&
1861 (scan->cll_descr.cld_mode == CLM_GROUP ||
1862 cl_lock_ext_match(&scan->cll_descr, need)) &&
1863 scan->cll_state >= CLS_HELD &&
1864 scan->cll_state < CLS_FREEING &&
1866 * This check is racy as the lock can be canceled right
1867 * after it is done, but this is fine, because page exists
1870 (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
1871 (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
1872 /* Don't increase cs_hit here since this
1873 * is just a helper function. */
1874 cl_lock_get_trust(scan);
1879 cfs_spin_unlock(&head->coh_lock_guard);
1882 EXPORT_SYMBOL(cl_lock_at_page);
1885 * Returns a list of pages protected (only) by a given lock.
1887 * Scans an extent of page radix tree, corresponding to the \a lock and queues
1888 * all pages that are not protected by locks other than \a lock into \a queue.
1890 void cl_lock_page_list_fixup(const struct lu_env *env,
1891 struct cl_io *io, struct cl_lock *lock,
1892 struct cl_page_list *queue)
1894 struct cl_page *page;
1895 struct cl_page *temp;
1896 struct cl_page_list *plist = &cl_env_info(env)->clt_list;
1898 LINVRNT(cl_lock_invariant(env, lock));
1901 /* Now, we have a list of cl_pages under the \a lock, we need
1902 * to check if some of pages are covered by other ldlm lock.
1903 * If this is the case, they aren't needed to be written out this time.
1905 * For example, we have A:[0,200] & B:[100,300] PW locks on client, now
1906 * the latter is to be canceled, this means other client is
1907 * reading/writing [200,300] since A won't canceled. Actually
1908 * we just need to write the pages covered by [200,300]. This is safe,
1909 * since [100,200] is also protected lock A.
1912 cl_page_list_init(plist);
1913 cl_page_list_for_each_safe(page, temp, queue) {
1914 pgoff_t idx = page->cp_index;
1915 struct cl_lock *found;
1916 struct cl_lock_descr *descr;
1918 /* The algorithm counts on the index-ascending page index. */
1919 LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
1920 page->cp_index < temp->cp_index));
1922 found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
1927 descr = &found->cll_descr;
1928 cfs_list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
1930 idx = page->cp_index;
1931 if (descr->cld_start > idx || descr->cld_end < idx)
1933 cl_page_list_move(plist, queue, page);
1935 cl_lock_put(env, found);
1938 /* The pages in plist are covered by other locks, don't handle them
1942 cl_page_list_disown(env, io, plist);
1943 cl_page_list_fini(env, plist);
1946 EXPORT_SYMBOL(cl_lock_page_list_fixup);
1949 * Invalidate pages protected by the given lock, sending them out to the
1950 * server first, if necessary.
1952 * This function does the following:
1954 * - collects a list of pages to be invalidated,
1956 * - unmaps them from the user virtual memory,
1958 * - sends dirty pages to the server,
1960 * - waits for transfer completion,
1962 * - discards pages, and throws them out of memory.
1964 * If \a discard is set, pages are discarded without sending them to the
1967 * If error happens on any step, the process continues anyway (the reasoning
1968 * behind this being that lock cancellation cannot be delayed indefinitely).
1970 int cl_lock_page_out(const struct lu_env *env, struct cl_lock *lock,
1973 struct cl_thread_info *info = cl_env_info(env);
1974 struct cl_io *io = &info->clt_io;
1975 struct cl_2queue *queue = &info->clt_queue;
1976 struct cl_lock_descr *descr = &lock->cll_descr;
1978 int nonblock = 1, resched;
1981 LINVRNT(cl_lock_invariant(env, lock));
1984 io->ci_obj = cl_object_top(descr->cld_obj);
1985 result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
1990 cl_2queue_init(queue);
1991 cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
1992 descr->cld_end, &queue->c2_qin, nonblock,
1994 page_count = queue->c2_qin.pl_nr;
1995 if (page_count > 0) {
1996 result = cl_page_list_unmap(env, io, &queue->c2_qin);
1998 long timeout = 600; /* 10 minutes. */
1999 /* for debug purpose, if this request can't be
2000 * finished in 10 minutes, we hope it can
2003 result = cl_io_submit_sync(env, io, CRT_WRITE,
2007 CWARN("Writing %lu pages error: %d\n",
2008 page_count, result);
2010 cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
2011 cl_2queue_discard(env, io, queue);
2012 cl_2queue_disown(env, io, queue);
2014 cl_2queue_fini(env, queue);
2018 } while (resched || nonblock--);
2020 cl_io_fini(env, io);
2023 EXPORT_SYMBOL(cl_lock_page_out);
2026 * Eliminate all locks for a given object.
2028 * Caller has to guarantee that no lock is in active use.
2030 * \param cancel when this is set, cl_locks_prune() cancels locks before
2033 void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
2035 struct cl_object_header *head;
2036 struct cl_lock *lock;
2039 head = cl_object_header(obj);
2041 * If locks are destroyed without cancellation, all pages must be
2042 * already destroyed (as otherwise they will be left unprotected).
2044 LASSERT(ergo(!cancel,
2045 head->coh_tree.rnode == NULL && head->coh_pages == 0));
2047 cfs_spin_lock(&head->coh_lock_guard);
2048 while (!cfs_list_empty(&head->coh_locks)) {
2049 lock = container_of(head->coh_locks.next,
2050 struct cl_lock, cll_linkage);
2051 cl_lock_get_trust(lock);
2052 cfs_spin_unlock(&head->coh_lock_guard);
2053 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
2054 cl_lock_mutex_get(env, lock);
2055 if (lock->cll_state < CLS_FREEING) {
2056 LASSERT(lock->cll_holds == 0);
2057 LASSERT(lock->cll_users == 0);
2059 cl_lock_cancel(env, lock);
2060 cl_lock_delete(env, lock);
2062 cl_lock_mutex_put(env, lock);
2063 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
2064 cl_lock_put(env, lock);
2065 cfs_spin_lock(&head->coh_lock_guard);
2067 cfs_spin_unlock(&head->coh_lock_guard);
2070 EXPORT_SYMBOL(cl_locks_prune);
2073 * Returns true if \a addr is an address of an allocated cl_lock. Used in
2074 * assertions. This check is optimistically imprecise, i.e., it occasionally
2075 * returns true for the incorrect addresses, but if it returns false, then the
2076 * address is guaranteed to be incorrect. (Should be named cl_lockp().)
2080 int cl_is_lock(const void *addr)
2082 return cfs_mem_is_in_cache(addr, cl_lock_kmem);
2084 EXPORT_SYMBOL(cl_is_lock);
2086 static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
2087 const struct cl_io *io,
2088 const struct cl_lock_descr *need,
2089 const char *scope, const void *source)
2091 struct cl_lock *lock;
2096 lock = cl_lock_find(env, io, need);
2099 cl_lock_mutex_get(env, lock);
2100 if (lock->cll_state < CLS_FREEING &&
2101 !(lock->cll_flags & CLF_CANCELLED)) {
2102 cl_lock_hold_mod(env, lock, +1);
2103 lu_ref_add(&lock->cll_holders, scope, source);
2104 lu_ref_add(&lock->cll_reference, scope, source);
2107 cl_lock_mutex_put(env, lock);
2108 cl_lock_put(env, lock);
2114 * Returns a lock matching \a need description with a reference and a hold on
2117 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2118 * guarantees that lock is not in the CLS_FREEING state on return.
2120 struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
2121 const struct cl_lock_descr *need,
2122 const char *scope, const void *source)
2124 struct cl_lock *lock;
2128 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2130 cl_lock_mutex_put(env, lock);
2133 EXPORT_SYMBOL(cl_lock_hold);
2136 * Main high-level entry point of cl_lock interface that finds existing or
2137 * enqueues new lock matching given description.
2139 struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
2140 const struct cl_lock_descr *need,
2141 const char *scope, const void *source)
2143 struct cl_lock *lock;
2145 __u32 enqflags = need->cld_enq_flags;
2149 lock = cl_lock_hold_mutex(env, io, need, scope, source);
2150 if (!IS_ERR(lock)) {
2151 rc = cl_enqueue_locked(env, lock, io, enqflags);
2153 if (cl_lock_fits_into(env, lock, need, io)) {
2154 cl_lock_mutex_put(env, lock);
2155 cl_lock_lockdep_acquire(env,
2159 cl_unuse_locked(env, lock);
2161 cl_lock_trace(D_DLMTRACE, env, "enqueue failed", lock);
2162 cl_lock_hold_release(env, lock, scope, source);
2163 cl_lock_mutex_put(env, lock);
2164 lu_ref_del(&lock->cll_reference, scope, source);
2165 cl_lock_put(env, lock);
2172 EXPORT_SYMBOL(cl_lock_request);
2175 * Adds a hold to a known lock.
2177 void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
2178 const char *scope, const void *source)
2180 LINVRNT(cl_lock_is_mutexed(lock));
2181 LINVRNT(cl_lock_invariant(env, lock));
2182 LASSERT(lock->cll_state != CLS_FREEING);
2185 cl_lock_hold_mod(env, lock, +1);
2187 lu_ref_add(&lock->cll_holders, scope, source);
2188 lu_ref_add(&lock->cll_reference, scope, source);
2191 EXPORT_SYMBOL(cl_lock_hold_add);
2194 * Releases a hold and a reference on a lock, on which caller acquired a
2197 void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
2198 const char *scope, const void *source)
2200 LINVRNT(cl_lock_invariant(env, lock));
2202 cl_lock_hold_release(env, lock, scope, source);
2203 lu_ref_del(&lock->cll_reference, scope, source);
2204 cl_lock_put(env, lock);
2207 EXPORT_SYMBOL(cl_lock_unhold);
2210 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2212 void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
2213 const char *scope, const void *source)
2215 LINVRNT(cl_lock_invariant(env, lock));
2217 cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
2218 cl_lock_mutex_get(env, lock);
2219 cl_lock_hold_release(env, lock, scope, source);
2220 cl_lock_mutex_put(env, lock);
2221 lu_ref_del(&lock->cll_reference, scope, source);
2222 cl_lock_put(env, lock);
2225 EXPORT_SYMBOL(cl_lock_release);
2227 void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
2229 LINVRNT(cl_lock_is_mutexed(lock));
2230 LINVRNT(cl_lock_invariant(env, lock));
2233 cl_lock_used_mod(env, lock, +1);
2236 EXPORT_SYMBOL(cl_lock_user_add);
2238 int cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
2240 LINVRNT(cl_lock_is_mutexed(lock));
2241 LINVRNT(cl_lock_invariant(env, lock));
2242 LASSERT(lock->cll_users > 0);
2245 cl_lock_used_mod(env, lock, -1);
2246 RETURN(lock->cll_users == 0);
2248 EXPORT_SYMBOL(cl_lock_user_del);
2250 const char *cl_lock_mode_name(const enum cl_lock_mode mode)
2252 static const char *names[] = {
2253 [CLM_PHANTOM] = "P",
2258 if (0 <= mode && mode < ARRAY_SIZE(names))
2263 EXPORT_SYMBOL(cl_lock_mode_name);
2266 * Prints human readable representation of a lock description.
2268 void cl_lock_descr_print(const struct lu_env *env, void *cookie,
2269 lu_printer_t printer,
2270 const struct cl_lock_descr *descr)
2272 const struct lu_fid *fid;
2274 fid = lu_object_fid(&descr->cld_obj->co_lu);
2275 (*printer)(env, cookie, DDESCR"@"DFID, PDESCR(descr), PFID(fid));
2277 EXPORT_SYMBOL(cl_lock_descr_print);
2280 * Prints human readable representation of \a lock to the \a f.
2282 void cl_lock_print(const struct lu_env *env, void *cookie,
2283 lu_printer_t printer, const struct cl_lock *lock)
2285 const struct cl_lock_slice *slice;
2286 (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
2287 lock, cfs_atomic_read(&lock->cll_ref),
2288 lock->cll_state, lock->cll_error, lock->cll_holds,
2289 lock->cll_users, lock->cll_flags);
2290 cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
2291 (*printer)(env, cookie, " {\n");
2293 cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
2294 (*printer)(env, cookie, " %s@%p: ",
2295 slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
2297 if (slice->cls_ops->clo_print != NULL)
2298 slice->cls_ops->clo_print(env, cookie, printer, slice);
2299 (*printer)(env, cookie, "\n");
2301 (*printer)(env, cookie, "} lock@%p\n", lock);
2303 EXPORT_SYMBOL(cl_lock_print);
2305 int cl_lock_init(void)
2307 return lu_kmem_init(cl_lock_caches);
2310 void cl_lock_fini(void)
2312 lu_kmem_fini(cl_lock_caches);