return result;
}
+/**
+ * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
+ */
+static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
+{
+ return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
+}
+
+/**
+ * Returns a set of counters for this lock, depending on a lock nesting.
+ */
+static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
+ const struct cl_lock *lock)
+{
+ struct cl_thread_info *info;
+ enum clt_nesting_level nesting;
+
+ info = cl_env_info(env);
+ nesting = cl_lock_nesting(lock);
+ LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
+ return &info->clt_counters[nesting];
+}
+
#define RETIP ((unsigned long)__builtin_return_address(0))
#ifdef CONFIG_LOCKDEP
static void cl_lock_lockdep_acquire(const struct lu_env *env,
struct cl_lock *lock, __u32 enqflags)
{
- cl_env_info(env)->clt_nr_locks_acquired++;
+ cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
lock_acquire(&lock->dep_map, !!(enqflags & CEF_ASYNC),
/* try: */ 0, lock->cll_descr.cld_mode <= CLM_READ,
/* check: */ 2, RETIP);
static void cl_lock_lockdep_release(const struct lu_env *env,
struct cl_lock *lock)
{
- cl_env_info(env)->clt_nr_locks_acquired--;
+ cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
lock_release(&lock->dep_map, 0, RETIP);
}
*/
int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
{
- LINVRNT(need == CLM_READ || need == CLM_WRITE || need == CLM_PHANTOM);
- LINVRNT(has == CLM_READ || has == CLM_WRITE || has == CLM_PHANTOM);
+ LINVRNT(need == CLM_READ || need == CLM_WRITE ||
+ need == CLM_PHANTOM || need == CLM_GROUP);
+ LINVRNT(has == CLM_READ || has == CLM_WRITE ||
+ has == CLM_PHANTOM || has == CLM_GROUP);
CLASSERT(CLM_PHANTOM < CLM_READ);
CLASSERT(CLM_READ < CLM_WRITE);
+ CLASSERT(CLM_WRITE < CLM_GROUP);
- return need <= has;
+ if (has != CLM_GROUP)
+ return need <= has;
+ else
+ return need == has;
}
EXPORT_SYMBOL(cl_lock_mode_match);
return
has->cld_start <= need->cld_start &&
has->cld_end >= need->cld_end &&
- cl_lock_mode_match(has->cld_mode, need->cld_mode);
+ cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
+ (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
}
EXPORT_SYMBOL(cl_lock_ext_match);
LASSERT(cl_is_lock(lock));
LINVRNT(!cl_lock_is_mutexed(lock));
- LINVRNT(!mutex_is_locked(&lock->cll_guard));
ENTRY;
might_sleep();
struct cl_site *site = cl_object_site(obj);
ENTRY;
- OBD_SLAB_ALLOC_PTR(lock, cl_lock_kmem);
+ OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO);
if (lock != NULL) {
atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
head = cl_object_header(obj);
site = cl_object_site(obj);
- LINVRNT(spin_is_locked(&head->coh_lock_guard));
+ LINVRNT_SPIN_LOCKED(&head->coh_lock_guard);
atomic_inc(&site->cs_locks.cs_lookup);
list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched;
if (ok) {
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
+ cl_lock_put(env, lock);
}
cl_lock_mutex_put(env, lock);
if (!ok) {
}
EXPORT_SYMBOL(cl_lock_at);
-static void cl_lock_trace(struct cl_thread_info *info,
+static void cl_lock_trace(struct cl_thread_counters *counters,
const char *prefix, const struct cl_lock *lock)
{
CDEBUG(D_DLMTRACE|D_TRACE, "%s: %i@%p %p %i %i\n", prefix,
atomic_read(&lock->cll_ref), lock, lock->cll_guarder,
- lock->cll_depth, info->clt_nr_locks_locked);
+ lock->cll_depth, counters->ctc_nr_locks_locked);
}
static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
{
- struct cl_thread_info *info;
+ struct cl_thread_counters *counters;
- info = cl_env_info(env);
+ counters = cl_lock_counters(env, lock);
lock->cll_depth++;
- info->clt_nr_locks_locked++;
- lu_ref_add(&info->clt_locks_locked, "cll_guard", lock);
- cl_lock_trace(info, "got mutex", lock);
+ counters->ctc_nr_locks_locked++;
+ lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
+ cl_lock_trace(counters, "got mutex", lock);
}
/**
LINVRNT(lock->cll_depth > 0);
} else {
struct cl_object_header *hdr;
+ struct cl_thread_info *info;
+ int i;
LINVRNT(lock->cll_guarder != cfs_current());
hdr = cl_object_header(lock->cll_descr.cld_obj);
+ /*
+ * Check that mutices are taken in the bottom-to-top order.
+ */
+ info = cl_env_info(env);
+ for (i = 0; i < hdr->coh_nesting; ++i)
+ LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
lock->cll_guarder = cfs_current();
LINVRNT(lock->cll_depth == 0);
*/
void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
{
- struct cl_thread_info *info;
+ struct cl_thread_counters *counters;
LINVRNT(cl_lock_invariant(env, lock));
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(lock->cll_guarder == cfs_current());
LINVRNT(lock->cll_depth > 0);
- info = cl_env_info(env);
- LINVRNT(info->clt_nr_locks_locked > 0);
+ counters = cl_lock_counters(env, lock);
+ LINVRNT(counters->ctc_nr_locks_locked > 0);
- cl_lock_trace(info, "put mutex", lock);
- lu_ref_del(&info->clt_locks_locked, "cll_guard", lock);
- info->clt_nr_locks_locked--;
+ cl_lock_trace(counters, "put mutex", lock);
+ lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
+ counters->ctc_nr_locks_locked--;
if (--lock->cll_depth == 0) {
lock->cll_guarder = NULL;
mutex_unlock(&lock->cll_guard);
*/
int cl_lock_nr_mutexed(const struct lu_env *env)
{
- return cl_env_info(env)->clt_nr_locks_locked;
+ struct cl_thread_info *info;
+ int i;
+ int locked;
+
+ /*
+ * NOTE: if summation across all nesting levels (currently 2) proves
+ * too expensive, a summary counter can be added to
+ * struct cl_thread_info.
+ */
+ info = cl_env_info(env);
+ for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
+ locked += info->clt_counters[i].ctc_nr_locks_locked;
+ return locked;
}
EXPORT_SYMBOL(cl_lock_nr_mutexed);
* and head->coh_nesting == 1 check assumes two level top-sub
* hierarchy.
*/
- LASSERT(ergo(head->coh_nesting == 1 &&
- list_empty(&head->coh_locks), !head->coh_pages));
+ /*
+ * The count of pages of this object may NOT be zero because
+ * we don't cleanup the pages if they are in CPS_FREEING state.
+ * See cl_page_gang_lookup().
+ *
+ * It is safe to leave the CPS_FREEING pages in cache w/o
+ * a lock, because those page must not be uptodate.
+ * See cl_page_delete0 for details.
+ */
+ /* LASSERT(!ergo(head->coh_nesting == 1 &&
+ list_empty(&head->coh_locks), !head->coh_pages)); */
spin_unlock(&head->coh_lock_guard);
/*
* From now on, no new references to this lock can be acquired
EXIT;
}
+/**
+ * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
+ * top-lock (nesting == 0) accounts for this modification in the per-thread
+ * debugging counters. Sub-lock holds can be released by a thread different
+ * from one that acquired it.
+ */
static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
int delta)
{
- struct cl_thread_info *cti;
- struct cl_object_header *hdr;
+ struct cl_thread_counters *counters;
+ enum clt_nesting_level nesting;
- cti = cl_env_info(env);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
lock->cll_holds += delta;
- if (hdr->coh_nesting == 0) {
- cti->clt_nr_held += delta;
- LASSERT(cti->clt_nr_held >= 0);
+ nesting = cl_lock_nesting(lock);
+ if (nesting == CNL_TOP) {
+ counters = &cl_env_info(env)->clt_counters[CNL_TOP];
+ counters->ctc_nr_held += delta;
+ LASSERT(counters->ctc_nr_held >= 0);
}
}
+/**
+ * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
+ * cl_lock_hold_mod() for the explanation of the debugging code.
+ */
static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
int delta)
{
- struct cl_thread_info *cti;
- struct cl_object_header *hdr;
+ struct cl_thread_counters *counters;
+ enum clt_nesting_level nesting;
- cti = cl_env_info(env);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
lock->cll_users += delta;
- if (hdr->coh_nesting == 0) {
- cti->clt_nr_used += delta;
- LASSERT(cti->clt_nr_used >= 0);
+ nesting = cl_lock_nesting(lock);
+ if (nesting == CNL_TOP) {
+ counters = &cl_env_info(env)->clt_counters[CNL_TOP];
+ counters->ctc_nr_used += delta;
+ LASSERT(counters->ctc_nr_used >= 0);
}
}
lu_ref_del(&lock->cll_holders, scope, source);
cl_lock_hold_mod(env, lock, -1);
if (lock->cll_holds == 0) {
- if (lock->cll_descr.cld_mode == CLM_PHANTOM)
+ if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
+ lock->cll_descr.cld_mode == CLM_GROUP)
/*
- * If lock is still phantom when user is done with
- * it---destroy the lock.
+ * If lock is still phantom or grouplock when user is
+ * done with it---destroy the lock.
*/
lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
if (lock->cll_flags & CLF_CANCELPEND) {
* cl_lock_put() to finish it.
*
* \pre atomic_read(&lock->cll_ref) > 0
+ * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
+ * cl_lock_nr_mutexed(env) == 1)
+ * [i.e., if a top-lock is deleted, mutices of no other locks can be
+ * held, as deletion of sub-locks might require releasing a top-lock
+ * mutex]
*
* \see cl_lock_operations::clo_delete()
* \see cl_lock::cll_holds
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
+ LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
+ cl_lock_nr_mutexed(env) == 1));
ENTRY;
if (lock->cll_holds == 0)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
+
ENTRY;
if (lock->cll_holds == 0)
cl_lock_cancel0(env, lock);
list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
cl_lock_ext_match(&scan->cll_descr, need) &&
+ scan->cll_state >= CLS_HELD &&
scan->cll_state < CLS_FREEING &&
/*
* This check is racy as the lock can be canceled right
struct cl_io *io = &info->clt_io;
struct cl_2queue *queue = &info->clt_queue;
struct cl_lock_descr *descr = &lock->cll_descr;
- int result;
- int rc0;
- int rc1;
+ long page_count;
+ int result;
LINVRNT(cl_lock_invariant(env, lock));
ENTRY;
io->ci_obj = cl_object_top(descr->cld_obj);
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (result == 0) {
-
cl_2queue_init(queue);
cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
descr->cld_end, &queue->c2_qin);
- if (queue->c2_qin.pl_nr > 0) {
+ page_count = queue->c2_qin.pl_nr;
+ if (page_count > 0) {
result = cl_page_list_unmap(env, io, &queue->c2_qin);
if (!discard) {
- rc0 = cl_io_submit_rw(env, io,
- CRT_WRITE, queue);
- rc1 = cl_page_list_own(env, io,
- &queue->c2_qout);
- result = result ?: rc0 ?: rc1;
+ long timeout = 600; /* 10 minutes. */
+ /* for debug purpose, if this request can't be
+ * finished in 10 minutes, we hope it can
+ * notify us.
+ */
+ result = cl_io_submit_sync(env, io, CRT_WRITE,
+ queue, CRP_CANCEL,
+ timeout);
+ if (result)
+ CWARN("Writing %lu pages error: %d\n",
+ page_count, result);
}
cl_lock_page_list_fixup(env, io, lock, &queue->c2_qout);
cl_2queue_discard(env, io, queue);
const struct lu_fid *fid;
int rc;
int iter;
+ int warn;
ENTRY;
fid = lu_object_fid(&io->ci_obj->co_lu);
iter = 0;
do {
- CDEBUG(iter >= 16 && IS_PO2(iter) ? D_WARNING : D_DLMTRACE,
+ warn = iter >= 16 && IS_PO2(iter);
+ CDEBUG(warn ? D_WARNING : D_DLMTRACE,
DDESCR"@"DFID" %i %08x `%s'\n",
PDESCR(need), PFID(fid), iter, enqflags, scope);
lock = cl_lock_hold_mutex(env, io, need, scope, source);
cl_lock_lockdep_acquire(env,
lock, enqflags);
break;
- }
+ } else if (warn)
+ CL_LOCK_DEBUG(D_WARNING, env, lock,
+ "got (see bug 17665)\n");
cl_unuse_locked(env, lock);
}
cl_lock_hold_release(env, lock, scope, source);
static const char *names[] = {
[CLM_PHANTOM] = "PHANTOM",
[CLM_READ] = "READ",
- [CLM_WRITE] = "WRITE"
+ [CLM_WRITE] = "WRITE",
+ [CLM_GROUP] = "GROUP"
};
if (0 <= mode && mode < ARRAY_SIZE(names))
return names[mode];