* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <obd_support.h>
#include <lustre_fid.h>
#include <libcfs/list.h>
-/* lu_time_global_{init,fini}() */
-#include <lu_time.h>
-
#include <cl_object.h>
#include "cl_internal.h"
/** Lock class of cl_lock::cll_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
static cfs_mem_cache_t *cl_lock_kmem;
static struct lu_kmem_descr cl_lock_caches[] = {
}
};
+#ifdef CONFIG_DEBUG_PAGESTATE_TRACKING
+#define CS_LOCK_INC(o, item) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCK_DEC(o, item) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_locks.cs_stats[CS_##item])
+#define CS_LOCKSTATE_INC(o, state) \
+ cfs_atomic_inc(&cl_object_site(o)->cs_locks_state[state])
+#define CS_LOCKSTATE_DEC(o, state) \
+ cfs_atomic_dec(&cl_object_site(o)->cs_locks_state[state])
+#else
+#define CS_LOCK_INC(o, item)
+#define CS_LOCK_DEC(o, item)
+#define CS_LOCKSTATE_INC(o, state)
+#define CS_LOCKSTATE_DEC(o, state)
+#endif
+
/**
* Basic lock invariant that is maintained at all times. Caller either has a
* reference to \a lock, or somehow assures that \a lock cannot be freed.
#define RETIP ((unsigned long)__builtin_return_address(0))
#ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t cl_lock_key;
+static struct lock_class_key cl_lock_key;
static void cl_lock_lockdep_init(struct cl_lock *lock)
{
cfs_list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks.cs_total);
- cfs_atomic_dec(&cl_object_site(obj)->cs_locks_state[lock->cll_state]);
+ CS_LOCK_DEC(obj, total);
+ CS_LOCKSTATE_DEC(obj, lock->cll_state);
lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
lu_ref_fini(&lock->cll_holders);
- cfs_mutex_destroy(&lock->cll_guard);
+ mutex_destroy(&lock->cll_guard);
OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
EXIT;
}
void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
{
struct cl_object *obj;
- struct cl_site *site;
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
obj = lock->cll_descr.cld_obj;
LINVRNT(obj != NULL);
- site = cl_object_site(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
cfs_atomic_read(&lock->cll_ref), lock, RETIP);
LASSERT(cfs_list_empty(&lock->cll_linkage));
cl_lock_free(env, lock);
}
- cfs_atomic_dec(&site->cs_locks.cs_busy);
+ CS_LOCK_DEC(obj, busy);
}
EXIT;
}
*/
void cl_lock_get_trust(struct cl_lock *lock)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
cfs_atomic_read(&lock->cll_ref), lock, RETIP);
if (cfs_atomic_inc_return(&lock->cll_ref) == 1)
- cfs_atomic_inc(&site->cs_locks.cs_busy);
+ CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
}
EXPORT_SYMBOL(cl_lock_get_trust);
{
struct cl_lock *lock;
struct lu_object_header *head;
- struct cl_site *site = cl_object_site(obj);
ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
lu_ref_init(&lock->cll_reference);
lu_ref_init(&lock->cll_holders);
- cfs_mutex_init(&lock->cll_guard);
- cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+ mutex_init(&lock->cll_guard);
+ lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
cfs_waitq_init(&lock->cll_wq);
head = obj->co_lu.lo_header;
- cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
- cfs_atomic_inc(&site->cs_locks.cs_total);
- cfs_atomic_inc(&site->cs_locks.cs_created);
+ CS_LOCKSTATE_INC(obj, CLS_NEW);
+ CS_LOCK_INC(obj, total);
+ CS_LOCK_INC(obj, create);
cl_lock_lockdep_init(lock);
cfs_list_for_each_entry(obj, &head->loh_layers,
co_lu.lo_linkage) {
{
struct cl_lock *lock;
struct cl_object_header *head;
- struct cl_site *site;
ENTRY;
head = cl_object_header(obj);
- site = cl_object_site(obj);
LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_lookup);
+ CS_LOCK_INC(obj, lookup);
cfs_list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
matched);
if (matched) {
cl_lock_get_trust(lock);
- cfs_atomic_inc(&cl_object_site(obj)->cs_locks.cs_hit);
+ CS_LOCK_INC(obj, hit);
RETURN(lock);
}
}
struct cl_object_header *head;
struct cl_object *obj;
struct cl_lock *lock;
- struct cl_site *site;
ENTRY;
obj = need->cld_obj;
head = cl_object_header(obj);
- site = cl_object_site(obj);
-
- cfs_spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
-
- if (lock == NULL) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
-
- cfs_spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
- cfs_list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- cfs_spin_unlock(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_busy);
- } else {
- cfs_spin_unlock(&head->coh_lock_guard);
+
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+
+ if (lock == NULL) {
+ lock = cl_lock_alloc(env, obj, io, need);
+ if (!IS_ERR(lock)) {
+ struct cl_lock *ghost;
+
+ spin_lock(&head->coh_lock_guard);
+ ghost = cl_lock_lookup(env, obj, io, need);
+ if (ghost == NULL) {
+ cfs_list_add_tail(&lock->cll_linkage,
+ &head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+ CS_LOCK_INC(obj, busy);
+ } else {
+ spin_unlock(&head->coh_lock_guard);
/*
* Other threads can acquire references to the
* top-lock through its sub-locks. Hence, it
head = cl_object_header(obj);
do {
- cfs_spin_lock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_unlock(&head->coh_lock_guard);
if (lock == NULL)
return NULL;
info = cl_env_info(env);
for (i = 0; i < hdr->coh_nesting; ++i)
LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+ mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
lock->cll_guarder = cfs_current();
LINVRNT(lock->cll_depth == 0);
}
if (lock->cll_guarder == cfs_current()) {
LINVRNT(lock->cll_depth > 0);
cl_lock_mutex_tail(env, lock);
- } else if (cfs_mutex_trylock(&lock->cll_guard)) {
+ } else if (mutex_trylock(&lock->cll_guard)) {
LINVRNT(lock->cll_depth == 0);
lock->cll_guarder = cfs_current();
cl_lock_mutex_tail(env, lock);
counters->ctc_nr_locks_locked--;
if (--lock->cll_depth == 0) {
lock->cll_guarder = NULL;
- cfs_mutex_unlock(&lock->cll_guard);
+ mutex_unlock(&lock->cll_guard);
}
}
EXPORT_SYMBOL(cl_lock_mutex_put);
head = cl_object_header(lock->cll_descr.cld_obj);
- cfs_spin_lock(&head->coh_lock_guard);
- cfs_list_del_init(&lock->cll_linkage);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ cfs_list_del_init(&lock->cll_linkage);
+ spin_unlock(&head->coh_lock_guard);
/*
* From now on, no new references to this lock can be acquired
LASSERT(cl_lock_nr_mutexed(env) == 0);
- result = -EINTR;
+ /* Returning ERESTARTSYS instead of EINTR so syscalls
+ * can be restarted if signals are pending here */
+ result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
cfs_waitq_wait(&waiter, CFS_TASK_INTERRUPTIBLE);
if (!cfs_signal_pending())
void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state)
{
- struct cl_site *site = cl_object_site(lock->cll_descr.cld_obj);
-
ENTRY;
LASSERT(lock->cll_state <= state ||
(lock->cll_state == CLS_CACHED &&
lock->cll_state == CLS_INTRANSIT);
if (lock->cll_state != state) {
- cfs_atomic_dec(&site->cs_locks_state[lock->cll_state]);
- cfs_atomic_inc(&site->cs_locks_state[state]);
+ CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
+ CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
cl_lock_state_signal(env, lock, state);
lock->cll_state = state;
* now. If locks were indexed according to their extent and/or mode,
* that index would have to be updated here.
*/
- cfs_spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- cfs_spin_unlock(&hdr->coh_lock_guard);
- RETURN(0);
+ spin_lock(&hdr->coh_lock_guard);
+ lock->cll_descr = *desc;
+ spin_unlock(&hdr->coh_lock_guard);
+ RETURN(0);
}
EXPORT_SYMBOL(cl_lock_modify);
need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
- cfs_spin_lock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too */
cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
break;
}
}
- cfs_spin_unlock(&head->coh_lock_guard);
- RETURN(lock);
+ spin_unlock(&head->coh_lock_guard);
+ RETURN(lock);
}
EXPORT_SYMBOL(cl_lock_at_pgoff);
if (index >= info->clt_fn_index) {
struct cl_lock *tmp;
- /* refresh non-overlapped index */
- tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
- 1, 0);
+ /* refresh non-overlapped index */
+ tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
+ lock, 1, 0);
if (tmp != NULL) {
/* Cache the first-non-overlapped index so as to skip
* all pages within [index, clt_fn_index). This
LASSERT(ergo(!cancel,
head->coh_tree.rnode == NULL && head->coh_pages == 0));
- cfs_spin_lock(&head->coh_lock_guard);
- while (!cfs_list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ while (!cfs_list_empty(&head->coh_locks)) {
+ lock = container_of(head->coh_locks.next,
+ struct cl_lock, cll_linkage);
+ cl_lock_get_trust(lock);
+ spin_unlock(&head->coh_lock_guard);
lu_ref_add(&lock->cll_reference, "prune", cfs_current());
again:
cl_lock_mutex_put(env, lock);
lu_ref_del(&lock->cll_reference, "prune", cfs_current());
cl_lock_put(env, lock);
- cfs_spin_lock(&head->coh_lock_guard);
- }
- cfs_spin_unlock(&head->coh_lock_guard);
- EXIT;
+ spin_lock(&head->coh_lock_guard);
+ }
+ spin_unlock(&head->coh_lock_guard);
+ EXIT;
}
EXPORT_SYMBOL(cl_locks_prune);