* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Client Extent Lock.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
+#include <libcfs/libcfs.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
#include <cl_object.h>
#include "cl_internal.h"
/** Lock class of cl_lock::cll_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
-static cfs_mem_cache_t *cl_lock_kmem;
+static struct lock_class_key cl_lock_guard_class;
+static struct kmem_cache *cl_lock_kmem;
static struct lu_kmem_descr cl_lock_caches[] = {
{
}
};
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_LOCK_INC(o, item) \
+ atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCK_DEC(o, item) \
+ atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCKSTATE_INC(o, state) \
+ atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+#define CS_LOCKSTATE_DEC(o, state) \
+ atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+#else
+#define CS_LOCK_INC(o, item)
+#define CS_LOCK_DEC(o, item)
+#define CS_LOCKSTATE_INC(o, state)
+#define CS_LOCKSTATE_DEC(o, state)
+#endif
+
/**
* Basic lock invariant that is maintained at all times. Caller either has a
* reference to \a lock, or somehow assures that \a lock cannot be freed.
const struct cl_lock *lock)
{
return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- cfs_atomic_read(&lock->cll_ref) >= lock->cll_holds &&
+ atomic_read(&lock->cll_ref) >= lock->cll_holds &&
lock->cll_holds >= lock->cll_users &&
lock->cll_holds >= 0 &&
lock->cll_users >= 0 &&
{
int result;
- result = cfs_atomic_read(&lock->cll_ref) > 0 &&
+ result = atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
- if (!result && env != NULL)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
- return result;
+ if (!result && env != NULL)
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
+ return result;
}
/**
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)"
"(%p/%d/%d) at %s():%d\n",
- prefix, lock, cfs_atomic_read(&lock->cll_ref),
+ prefix, lock, atomic_read(&lock->cll_ref),
lock->cll_guarder, lock->cll_depth,
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags,
#define RETIP ((unsigned long)__builtin_return_address(0))
#ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t cl_lock_key;
+static struct lock_class_key cl_lock_key;
static void cl_lock_lockdep_init(struct cl_lock *lock)
{
struct cl_lock *lock, __u32 enqflags)
{
cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
-#ifdef HAVE_LOCK_MAP_ACQUIRE
lock_map_acquire(&lock->dep_map);
-#else /* HAVE_LOCK_MAP_ACQUIRE */
- lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
- /* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
- /* check: */ 2, RETIP);
-#endif /* HAVE_LOCK_MAP_ACQUIRE */
}
static void cl_lock_lockdep_release(const struct lu_env *env,
struct cl_lock *lock)
{
cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
- lock_release(&lock->dep_map, 0, RETIP);
+ lock_map_release(&lock->dep_map);
}
#else /* !CONFIG_LOCKDEP */
struct cl_object *obj,
const struct cl_lock_operations *ops)
{
- ENTRY;
- slice->cls_lock = lock;
- cfs_list_add_tail(&slice->cls_linkage, &lock->cll_layers);
- slice->cls_obj = obj;
- slice->cls_ops = ops;
- EXIT;
+ ENTRY;
+ slice->cls_lock = lock;
+ list_add_tail(&slice->cls_linkage, &lock->cll_layers);
+ slice->cls_obj = obj;
+ slice->cls_ops = ops;
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_slice_add);
LINVRNT(!cl_lock_is_mutexed(lock));
- ENTRY;
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- cfs_might_sleep();
- while (!cfs_list_empty(&lock->cll_layers)) {
- struct cl_lock_slice *slice;
-
- slice = cfs_list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
- cfs_list_del_init(lock->cll_layers.next);
- slice->cls_ops->clo_fini(env, slice);
- }
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
- lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
+ ENTRY;
+ cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
+ while (!list_empty(&lock->cll_layers)) {
+ struct cl_lock_slice *slice;
+
+ slice = list_entry(lock->cll_layers.next,
+ struct cl_lock_slice, cls_linkage);
+ list_del_init(lock->cll_layers.next);
+ slice->cls_ops->clo_fini(env, slice);
+ }
+ CS_LOCK_DEC(obj, total);
+ CS_LOCKSTATE_DEC(obj, lock->cll_state);
+ lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
lu_ref_fini(&lock->cll_holders);
- cfs_mutex_destroy(&lock->cll_guard);
+ mutex_destroy(&lock->cll_guard);
OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
EXIT;
}
void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
{
struct cl_object *obj;
- struct cl_site *site;
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
obj = lock->cll_descr.cld_obj;
LINVRNT(obj != NULL);
- site = cl_object_site(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
+ atomic_read(&lock->cll_ref), lock, RETIP);
- if (cfs_atomic_dec_and_test(&lock->cll_ref)) {
+ if (atomic_dec_and_test(&lock->cll_ref)) {
if (lock->cll_state == CLS_FREEING) {
- LASSERT(cfs_list_empty(&lock->cll_linkage));
+ LASSERT(list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
}
- cfs_atomic_dec(&site->cs_locks.cs_busy);
+ CS_LOCK_DEC(obj, busy);
}
EXIT;
}
{
LINVRNT(cl_lock_invariant(NULL, lock));
CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
- cfs_atomic_inc(&lock->cll_ref);
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ atomic_inc(&lock->cll_ref);
}
EXPORT_SYMBOL(cl_lock_get);
*/
void cl_lock_get_trust(struct cl_lock *lock)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- cfs_atomic_read(&lock->cll_ref), lock, RETIP);
- if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
- cfs_atomic_inc(&site->cs_locks.cs_busy);
+ atomic_read(&lock->cll_ref), lock, RETIP);
+ if (atomic_inc_return(&lock->cll_ref) == 1)
+ CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
}
EXPORT_SYMBOL(cl_lock_get_trust);
}
static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
-{
- struct cl_lock *lock;
- struct lu_object_header *head;
- struct cl_site *site = cl_object_site(obj);
-
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
- if (lock != NULL) {
- cfs_atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
- lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
- "cl_lock", lock);
- CFS_INIT_LIST_HEAD(&lock->cll_layers);
- CFS_INIT_LIST_HEAD(&lock->cll_linkage);
- CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
- cfs_mutex_init(&lock->cll_guard);
- cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- cfs_waitq_init(&lock->cll_wq);
- head = obj->co_lu.lo_header;
- cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
- cfs_atomic_inc(&site->cs_locks.cs_total);
- cfs_atomic_inc(&site->cs_locks.cs_created);
- cl_lock_lockdep_init(lock);
- cfs_list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- RETURN(lock);
+ struct cl_object *obj,
+ const struct cl_io *io,
+ const struct cl_lock_descr *descr)
+{
+ struct cl_lock *lock;
+ struct lu_object_header *head;
+
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, GFP_NOFS);
+ if (lock != NULL) {
+ atomic_set(&lock->cll_ref, 1);
+ lock->cll_descr = *descr;
+ lock->cll_state = CLS_NEW;
+ cl_object_get(obj);
+ lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
+ lock);
+ INIT_LIST_HEAD(&lock->cll_layers);
+ INIT_LIST_HEAD(&lock->cll_linkage);
+ INIT_LIST_HEAD(&lock->cll_inclosure);
+ lu_ref_init(&lock->cll_reference);
+ lu_ref_init(&lock->cll_holders);
+ mutex_init(&lock->cll_guard);
+ lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+ init_waitqueue_head(&lock->cll_wq);
+ head = obj->co_lu.lo_header;
+ CS_LOCKSTATE_INC(obj, CLS_NEW);
+ CS_LOCK_INC(obj, total);
+ CS_LOCK_INC(obj, create);
+ cl_lock_lockdep_init(lock);
+ list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
+ int err;
+
+ err = obj->co_ops->coo_lock_init(env, obj, lock, io);
+ if (err != 0) {
+ cl_lock_finish(env, lock);
+ lock = ERR_PTR(err);
+ break;
+ }
+ }
+ } else
+ lock = ERR_PTR(-ENOMEM);
+ RETURN(lock);
}
/**
* \see CLS_INTRANSIT
*/
enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
+ struct cl_lock *lock)
{
- enum cl_lock_state state = lock->cll_state;
+ enum cl_lock_state state = lock->cll_state;
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
+ "Malformed lock state %d.\n", state);
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = cfs_current();
- cl_lock_hold_add(env, lock, "intransit", cfs_current());
- return state;
+ cl_lock_state_set(env, lock, CLS_INTRANSIT);
+ lock->cll_intransit_owner = current;
+ cl_lock_hold_add(env, lock, "intransit", current);
+ return state;
}
EXPORT_SYMBOL(cl_lock_intransit);
* Exit the intransit state and restore the lock state to the original state
*/
void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
+ enum cl_lock_state state)
{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == cfs_current());
+ LASSERT(cl_lock_is_mutexed(lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LASSERT(state != CLS_INTRANSIT);
+ LASSERT(lock->cll_intransit_owner == current);
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", cfs_current());
+ lock->cll_intransit_owner = NULL;
+ cl_lock_state_set(env, lock, state);
+ cl_lock_unhold(env, lock, "intransit", current);
}
EXPORT_SYMBOL(cl_lock_extransit);
*/
int cl_lock_is_intransit(struct cl_lock *lock)
{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != cfs_current();
+ LASSERT(cl_lock_is_mutexed(lock));
+ return lock->cll_state == CLS_INTRANSIT &&
+ lock->cll_intransit_owner != current;
}
EXPORT_SYMBOL(cl_lock_is_intransit);
/**
LINVRNT(cl_lock_invariant_trusted(env, lock));
ENTRY;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
RETURN(0);
{
struct cl_lock *lock;
struct cl_object_header *head;
- struct cl_site *site;
ENTRY;
- head = cl_object_header(obj);
- site = cl_object_site(obj);
- LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_lookup);
- cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
+ head = cl_object_header(obj);
+ assert_spin_locked(&head->coh_lock_guard);
+ CS_LOCK_INC(obj, lookup);
+ list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
+ int matched;
matched = cl_lock_ext_match(&lock->cll_descr, need) &&
lock->cll_state < CLS_FREEING &&
matched);
if (matched) {
cl_lock_get_trust(lock);
- cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+ CS_LOCK_INC(obj, hit);
RETURN(lock);
}
}
struct cl_object_header *head;
struct cl_object *obj;
struct cl_lock *lock;
- struct cl_site *site;
ENTRY;
obj = need->cld_obj;
head = cl_object_header(obj);
- site = cl_object_site(obj);
-
- cfs_spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
-
- if (lock == NULL) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
-
- cfs_spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
- cfs_list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- cfs_spin_unlock(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_busy);
- } else {
- cfs_spin_unlock(&head->coh_lock_guard);
+
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+
+ if (lock == NULL) {
+ lock = cl_lock_alloc(env, obj, io, need);
+ if (!IS_ERR(lock)) {
+ struct cl_lock *ghost;
+
+ spin_lock(&head->coh_lock_guard);
+ ghost = cl_lock_lookup(env, obj, io, need);
+ if (ghost == NULL) {
+ cl_lock_get_trust(lock);
+ list_add_tail(&lock->cll_linkage,
+ &head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+ CS_LOCK_INC(obj, busy);
+ } else {
+ spin_unlock(&head->coh_lock_guard);
/*
* Other threads can acquire references to the
* top-lock through its sub-locks. Hence, it
obj = need->cld_obj;
head = cl_object_header(obj);
- cfs_spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
-
- if (lock == NULL)
- return NULL;
+ do {
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+ if (lock == NULL)
+ return NULL;
+
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state == CLS_INTRANSIT)
+ /* Don't care return value. */
+ cl_lock_state_wait(env, lock);
+ if (lock->cll_state == CLS_FREEING) {
+ cl_lock_mutex_put(env, lock);
+ cl_lock_put(env, lock);
+ lock = NULL;
+ }
+ } while (lock == NULL);
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state == CLS_INTRANSIT)
- cl_lock_state_wait(env, lock); /* Don't care return value. */
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
if (lock->cll_state == CLS_CACHED)
LINVRNT(cl_lock_invariant_trusted(NULL, lock));
ENTRY;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
RETURN(slice);
}
*/
void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == cfs_current()) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
+ LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(lock->cll_guarder != cfs_current());
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = cfs_current();
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
+ if (lock->cll_guarder == current) {
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_depth > 0);
+ } else {
+ struct cl_object_header *hdr;
+ struct cl_thread_info *info;
+ int i;
+
+ LINVRNT(lock->cll_guarder != current);
+ hdr = cl_object_header(lock->cll_descr.cld_obj);
+ /*
+ * Check that mutices are taken in the bottom-to-top order.
+ */
+ info = cl_env_info(env);
+ for (i = 0; i < hdr->coh_nesting; ++i)
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
+ mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+ lock->cll_guarder = current;
+ LINVRNT(lock->cll_depth == 0);
+ }
+ cl_lock_mutex_tail(env, lock);
}
EXPORT_SYMBOL(cl_lock_mutex_get);
*/
int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
{
- int result;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
-
- result = 0;
- if (lock->cll_guarder == cfs_current()) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
- } else if (cfs_mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = cfs_current();
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- RETURN(result);
+ int result;
+
+ LINVRNT(cl_lock_invariant_trusted(env, lock));
+ ENTRY;
+
+ result = 0;
+ if (lock->cll_guarder == current) {
+ LINVRNT(lock->cll_depth > 0);
+ cl_lock_mutex_tail(env, lock);
+ } else if (mutex_trylock(&lock->cll_guard)) {
+ LINVRNT(lock->cll_depth == 0);
+ lock->cll_guarder = current;
+ cl_lock_mutex_tail(env, lock);
+ } else
+ result = -EBUSY;
+ RETURN(result);
}
EXPORT_SYMBOL(cl_lock_mutex_try);
*/
void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
{
- struct cl_thread_counters *counters;
+ struct cl_thread_counters *counters;
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == cfs_current());
- LINVRNT(lock->cll_depth > 0);
+ LINVRNT(cl_lock_invariant(env, lock));
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(lock->cll_guarder == current);
+ LINVRNT(lock->cll_depth > 0);
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
-
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
- cfs_mutex_unlock(&lock->cll_guard);
- }
+ counters = cl_lock_counters(env, lock);
+ LINVRNT(counters->ctc_nr_locks_locked > 0);
+
+ cl_lock_trace(D_TRACE, env, "put mutex", lock);
+ lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
+ counters->ctc_nr_locks_locked--;
+ if (--lock->cll_depth == 0) {
+ lock->cll_guarder = NULL;
+ mutex_unlock(&lock->cll_guard);
+ }
}
EXPORT_SYMBOL(cl_lock_mutex_put);
*/
int cl_lock_is_mutexed(struct cl_lock *lock)
{
- return lock->cll_guarder == cfs_current();
+ return lock->cll_guarder == current;
}
EXPORT_SYMBOL(cl_lock_is_mutexed);
const struct cl_lock_slice *slice;
lock->cll_flags |= CLF_CANCELLED;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
if (slice->cls_ops->clo_cancel != NULL)
slice->cls_ops->clo_cancel(env, slice);
}
ENTRY;
if (lock->cll_state < CLS_FREEING) {
+ bool in_cache;
+
LASSERT(lock->cll_state != CLS_INTRANSIT);
cl_lock_state_set(env, lock, CLS_FREEING);
head = cl_object_header(lock->cll_descr.cld_obj);
- cfs_spin_lock(&head->coh_lock_guard);
- cfs_list_del_init(&lock->cll_linkage);
-
- cfs_spin_unlock(&head->coh_lock_guard);
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete != NULL)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
- EXIT;
+ spin_lock(&head->coh_lock_guard);
+ in_cache = !list_empty(&lock->cll_linkage);
+ if (in_cache)
+ list_del_init(&lock->cll_linkage);
+ spin_unlock(&head->coh_lock_guard);
+
+ if (in_cache) /* coh_locks cache holds a refcount. */
+ cl_lock_put(env, lock);
+
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by cl_lock_lookup().
+ */
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_delete != NULL)
+ slice->cls_ops->clo_delete(env, slice);
+ }
+ /*
+ * From now on, no new references to this lock can be acquired
+ * by layer-specific means (like a pointer from struct
+ * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
+ * lov).
+ *
+ * Lock will be finally freed in cl_lock_put() when last of
+ * existing references goes away.
+ */
+ }
+ EXIT;
}
/**
}
}
-static void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
}
EXIT;
}
+EXPORT_SYMBOL(cl_lock_hold_release);
/**
* Waits until lock state is changed.
*/
int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
{
- cfs_waitlink_t waiter;
- cfs_sigset_t blocked;
- int result;
-
- ENTRY;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305 */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- cfs_waitlink_init(&waiter);
- cfs_waitq_add(&lock->cll_wq, &waiter);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
+ wait_queue_t waiter;
+ sigset_t blocked;
+ int result;
+
+ ENTRY;
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_depth == 1);
+ LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
+
+ cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
+ result = lock->cll_error;
+ if (result == 0) {
+ /* To avoid being interrupted by the 'non-fatal' signals
+ * (SIGCHLD, for instance), we'd block them temporarily.
+ * LU-305 */
+ blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
+
+ init_waitqueue_entry_current(&waiter);
+ add_wait_queue(&lock->cll_wq, &waiter);
+ set_current_state(TASK_INTERRUPTIBLE);
+ cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
+ LASSERT(cl_lock_nr_mutexed(env) == 0);
- result = -EINTR;
+ /* Returning ERESTARTSYS instead of EINTR so syscalls
+ * can be restarted if signals are pending here */
+ result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&waiter, TASK_INTERRUPTIBLE);
if (!cfs_signal_pending())
result = 0;
}
- cl_lock_mutex_get(env, lock);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&lock->cll_wq, &waiter);
+ cl_lock_mutex_get(env, lock);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&lock->cll_wq, &waiter);
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- RETURN(result);
+ /* Restore old blocked signals */
+ cfs_restore_sigs(blocked);
+ }
+ RETURN(result);
}
EXPORT_SYMBOL(cl_lock_state_wait);
static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
+ enum cl_lock_state state)
{
- const struct cl_lock_slice *slice;
+ const struct cl_lock_slice *slice;
- ENTRY;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
+ ENTRY;
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state != NULL)
- slice->cls_ops->clo_state(env, slice, state);
- cfs_waitq_broadcast(&lock->cll_wq);
- EXIT;
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
+ if (slice->cls_ops->clo_state != NULL)
+ slice->cls_ops->clo_state(env, slice, state);
+ wake_up_all(&lock->cll_wq);
+ EXIT;
}
/**
void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
ENTRY;
LASSERT(lock->cll_state <= state ||
(lock->cll_state == CLS_CACHED &&
lock->cll_state == CLS_INTRANSIT);
if (lock->cll_state != state) {
- cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
- cfs_atomic_inc(&site->cs_locks_state[state]);
+ CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
+ CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
cl_lock_state_signal(env, lock, state);
lock->cll_state = state;
static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
{
- const struct cl_lock_slice *slice;
- int result;
+ const struct cl_lock_slice *slice;
+ int result;
- do {
- result = 0;
+ do {
+ result = 0;
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_state == CLS_INTRANSIT);
- result = -ENOSYS;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse != NULL) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
+ result = -ENOSYS;
+ list_for_each_entry_reverse(slice, &lock->cll_layers,
+ cls_linkage) {
+ if (slice->cls_ops->clo_unuse != NULL) {
+ result = slice->cls_ops->clo_unuse(env, slice);
+ if (result != 0)
+ break;
+ }
+ }
+ LASSERT(result != -ENOSYS);
+ } while (result == CLO_REPEAT);
- return result;
+ return result;
}
/**
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_use != NULL) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
ENTRY;
result = -ENOSYS;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
case CLS_QUEUING:
/* kick layers. */
result = cl_enqueue_kick(env, lock, io, flags);
- if (result == 0)
+ /* For AGL case, the cl_lock::cll_state may
+ * become CLS_HELD already. */
+ if (result == 0 && lock->cll_state == CLS_QUEUING)
cl_lock_state_set(env, lock, CLS_ENQUEUED);
break;
case CLS_INTRANSIT:
do {
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT);
+ LASSERTF(lock->cll_state == CLS_QUEUING ||
+ lock->cll_state == CLS_ENQUEUED ||
+ lock->cll_state == CLS_HELD ||
+ lock->cll_state == CLS_INTRANSIT,
+ "lock state: %d\n", lock->cll_state);
LASSERT(lock->cll_users > 0);
LASSERT(lock->cll_holds > 0);
break;
result = -ENOSYS;
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_wait != NULL) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
LINVRNT(cl_lock_invariant(env, lock));
pound = 0;
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_weigh != NULL) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- cfs_list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_modify != NULL) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
* now. If locks were indexed according to their extent and/or mode,
* that index would have to be updated here.
*/
- cfs_spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- cfs_spin_unlock(&hdr->coh_lock_guard);
- RETURN(0);
+ spin_lock(&hdr->coh_lock_guard);
+ lock->cll_descr = *desc;
+ spin_unlock(&hdr->coh_lock_guard);
+ RETURN(0);
}
EXPORT_SYMBOL(cl_lock_modify);
LINVRNT(cl_lock_is_mutexed(origin));
LINVRNT(cl_lock_invariant(env, origin));
- CFS_INIT_LIST_HEAD(&closure->clc_list);
+ INIT_LIST_HEAD(&closure->clc_list);
closure->clc_origin = origin;
closure->clc_wait = wait;
closure->clc_nr = 0;
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_closure != NULL) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
* If lock->cll_inclosure is not empty, lock is already in
* this closure.
*/
- if (cfs_list_empty(&lock->cll_inclosure)) {
+ if (list_empty(&lock->cll_inclosure)) {
cl_lock_get_trust(lock);
lu_ref_add(&lock->cll_reference, "closure", closure);
- cfs_list_add(&lock->cll_inclosure, &closure->clc_list);
+ list_add(&lock->cll_inclosure, &closure->clc_list);
closure->clc_nr++;
} else
cl_lock_mutex_put(env, lock);
void cl_lock_disclosure(const struct lu_env *env,
struct cl_lock_closure *closure)
{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- cfs_list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure){
- cfs_list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
+ struct cl_lock *scan;
+ struct cl_lock *temp;
+
+ cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
+ list_for_each_entry_safe(scan, temp, &closure->clc_list,
+ cll_inclosure){
+ list_del_init(&scan->cll_inclosure);
+ cl_lock_mutex_put(env, scan);
+ lu_ref_del(&scan->cll_reference, "closure", closure);
+ cl_lock_put(env, scan);
+ closure->clc_nr--;
+ }
+ LASSERT(closure->clc_nr == 0);
}
EXPORT_SYMBOL(cl_lock_disclosure);
void cl_lock_closure_fini(struct cl_lock_closure *closure)
{
LASSERT(closure->clc_nr == 0);
- LASSERT(cfs_list_empty(&closure->clc_list));
+ LASSERT(list_empty(&closure->clc_list));
}
EXPORT_SYMBOL(cl_lock_closure_fini);
need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
- cfs_spin_lock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too */
- cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
+ list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
(scan->cll_descr.cld_mode == CLM_GROUP ||
cl_lock_ext_match(&scan->cll_descr, need)) &&
break;
}
}
- cfs_spin_unlock(&head->coh_lock_guard);
- RETURN(lock);
+ spin_unlock(&head->coh_lock_guard);
+ RETURN(lock);
}
EXPORT_SYMBOL(cl_lock_at_pgoff);
/**
- * Calculate the page offset at the layer of @lock.
- * At the time of this writing, @page is top page and @lock is sub lock.
- */
-static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
-{
- struct lu_device_type *dtype;
- const struct cl_page_slice *slice;
-
- dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
- slice = cl_page_at(page, dtype);
- LASSERT(slice != NULL);
- return slice->cpl_page->cp_index;
-}
-
-/**
- * Check if page @page is covered by an extra lock or discard it.
- */
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
- pgoff_t index = pgoff_at_lock(page, lock);
-
- if (index >= info->clt_fn_index) {
- struct cl_lock *tmp;
-
- /* refresh non-overlapped index */
- tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
- 1, 0);
- if (tmp != NULL) {
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, clt_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages. */
- info->clt_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
- info->clt_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
- }
-
- info->clt_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
-
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
-
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, page))));
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(env, page))));
-
- info->clt_next_index = pgoff_at_lock(page, lock) + 1;
- if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
-
- return CLP_GANG_OKAY;
-}
-
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_io *io = &info->clt_io;
- struct cl_lock_descr *descr = &lock->cll_descr;
- cl_page_gang_cb_t cb;
- int res;
- int result;
-
- LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
-
- io->ci_obj = cl_object_top(descr->cld_obj);
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- GOTO(out, result);
-
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->clt_fn_index = info->clt_next_index = descr->cld_start;
- do {
- res = cl_page_gang_lookup(env, descr->cld_obj, io,
- info->clt_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->clt_next_index > descr->cld_end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cfs_cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- RETURN(result);
-}
-EXPORT_SYMBOL(cl_lock_discard_pages);
-
-/**
* Eliminate all locks for a given object.
*
* Caller has to guarantee that no lock is in active use.
*/
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
{
- struct cl_object_header *head;
- struct cl_lock *lock;
+ struct cl_object_header *head;
+ struct cl_lock *lock;
- ENTRY;
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- head->coh_tree.rnode == NULL && head->coh_pages == 0));
+ ENTRY;
+ head = cl_object_header(obj);
- cfs_spin_lock(&head->coh_lock_guard);
- while (!cfs_list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- cfs_spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", cfs_current());
+ spin_lock(&head->coh_lock_guard);
+ while (!list_empty(&head->coh_locks)) {
+ lock = container_of(head->coh_locks.next,
+ struct cl_lock, cll_linkage);
+ cl_lock_get_trust(lock);
+ spin_unlock(&head->coh_lock_guard);
+ lu_ref_add(&lock->cll_reference, "prune", current);
again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_holds == 0);
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", cfs_current());
- cl_lock_put(env, lock);
- cfs_spin_lock(&head->coh_lock_guard);
- }
- cfs_spin_unlock(&head->coh_lock_guard);
- EXIT;
+ cl_lock_mutex_get(env, lock);
+ if (lock->cll_state < CLS_FREEING) {
+ LASSERT(lock->cll_users <= 1);
+ if (unlikely(lock->cll_users == 1)) {
+ struct l_wait_info lwi = { 0 };
+
+ cl_lock_mutex_put(env, lock);
+ l_wait_event(lock->cll_wq,
+ lock->cll_users == 0,
+ &lwi);
+ goto again;
+ }
+
+ if (cancel)
+ cl_lock_cancel(env, lock);
+ cl_lock_delete(env, lock);
+ }
+ cl_lock_mutex_put(env, lock);
+ lu_ref_del(&lock->cll_reference, "prune", current);
+ cl_lock_put(env, lock);
+ spin_lock(&head->coh_lock_guard);
+ }
+ spin_unlock(&head->coh_lock_guard);
+ EXIT;
}
EXPORT_SYMBOL(cl_locks_prune);
LASSERT(lock->cll_state != CLS_FREEING);
ENTRY;
- cl_lock_hold_mod(env, lock, +1);
cl_lock_get(lock);
+ cl_lock_hold_mod(env, lock, +1);
lu_ref_add(&lock->cll_holders, scope, source);
lu_ref_add(&lock->cll_reference, scope, source);
EXIT;
void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
+ LINVRNT(cl_lock_is_mutexed(lock));
+ LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(lock->cll_users > 0);
- ENTRY;
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- cfs_waitq_broadcast(&lock->cll_wq);
- EXIT;
+ ENTRY;
+ cl_lock_used_mod(env, lock, -1);
+ if (lock->cll_users == 0)
+ wake_up_all(&lock->cll_wq);
+ EXIT;
}
EXPORT_SYMBOL(cl_lock_user_del);
[CLM_WRITE] = "W",
[CLM_GROUP] = "G"
};
- if (0 <= mode && mode < ARRAY_SIZE(names))
- return names[mode];
- else
- return "U";
+ CLASSERT(CLM_MAX == ARRAY_SIZE(names));
+ return names[mode];
}
EXPORT_SYMBOL(cl_lock_mode_name);
{
const struct cl_lock_slice *slice;
(*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, cfs_atomic_read(&lock->cll_ref),
+ lock, atomic_read(&lock->cll_ref),
lock->cll_state, lock->cll_error, lock->cll_holds,
lock->cll_users, lock->cll_flags);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
- cfs_list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
+ list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);