-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "lov_cl_internal.h"
-/** \addtogroup lov lov @{ */
+/** \addtogroup lov
+ * @{
+ */
/*****************************************************************************
*
ENTRY;
lsl = cl2lovsub_lock(slice);
- LASSERT(list_empty(&lsl->lss_parents));
+ LASSERT(cfs_list_empty(&lsl->lss_parents));
OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
EXIT;
}
EXIT;
}
-static int lovsub_lock_state_one(const struct lu_env *env,
- const struct lovsub_lock *lovsub,
- struct lov_lock *lov)
-{
- struct cl_lock *parent;
- struct cl_lock *child;
- int restart = 0;
-
- ENTRY;
- parent = lov->lls_cl.cls_lock;
- child = lovsub->lss_cl.cls_lock;
-
- if (lovsub->lss_active != parent) {
- lovsub_parent_lock(env, lov);
- if (child->cll_error != 0 && parent->cll_error == 0) {
- /*
- * This is a deadlock case:
- * cl_lock_error(for the parent lock)
- * -> cl_lock_delete
- * -> lov_lock_delete
- * -> cl_lock_enclosure
- * -> cl_lock_mutex_try(for the child lock)
- */
- cl_lock_mutex_put(env, child);
- cl_lock_error(env, parent, child->cll_error);
- restart = 1;
- } else {
- cl_lock_signal(env, parent);
- }
- lovsub_parent_unlock(env, lov);
- }
- RETURN(restart);
-}
-
/**
* Implements cl_lock_operations::clo_state() method for lovsub layer, which
* method is called whenever sub-lock state changes. Propagates state change
{
struct lovsub_lock *sub = cl2lovsub_lock(slice);
struct lov_lock_link *scan;
- int restart = 0;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
ENTRY;
- do {
- restart = 0;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- restart = lovsub_lock_state_one(env, sub,
- scan->lll_super);
- if (restart) {
- cl_lock_mutex_get(env, slice->cls_lock);
- break;
- }
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ struct lov_lock *lov = scan->lll_super;
+ struct cl_lock *parent = lov->lls_cl.cls_lock;
+
+ if (sub->lss_active != parent) {
+ lovsub_parent_lock(env, lov);
+ cl_lock_signal(env, parent);
+ lovsub_parent_unlock(env, lov);
}
- } while(restart);
+ }
EXIT;
}
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- if (!list_empty(&lock->lss_parents)) {
+ if (!cfs_list_empty(&lock->lss_parents)) {
/*
* It is not clear whether all parents have to be asked and
* their estimations summed, or it is enough to ask one. For
* Maps start/end offsets within a stripe, to offsets within a file.
*/
static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
- struct lov_object *obj,
- int stripe, struct cl_lock_descr *out)
+ struct lov_object *lov,
+ int stripe, struct cl_lock_descr *out)
{
- struct lov_stripe_md *lsm = lov_r0(obj)->lo_lsm;
pgoff_t size; /* stripe size in pages */
pgoff_t skip; /* how many pages in every stripe are occupied by
* "other" stripes */
start = in->cld_start;
end = in->cld_end;
- /*
- * XXX join file support.
- */
- if (lsm->lsm_stripe_count > 1) {
- size = cl_index(lov2cl(obj), lsm->lsm_stripe_size);
- skip = (lsm->lsm_stripe_count - 1) * size;
+ if (lov->lo_lsm->lsm_stripe_count > 1) {
+ size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
+ skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
/* XXX overflow check here? */
start += start/size * skip + stripe * size;
const struct cl_lock_descr *d, int idx)
{
struct cl_lock *parent;
- struct cl_lock *child;
struct lovsub_object *subobj;
struct cl_lock_descr *pd;
struct cl_lock_descr *parent_descr;
parent_descr = &parent->cll_descr;
LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
- child = sublock->lss_cl.cls_lock;
subobj = cl2lovsub(sublock->lss_cl.cls_obj);
pd = &lov_env_info(env)->lti_ldescr;
LASSERT(cl_lock_mode_match(d->cld_mode,
s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) {
int rc;
lov = scan->lll_super;
sub = cl2lovsub_lock(slice);
result = 0;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
parent = scan->lll_super->lls_cl.cls_lock;
result = cl_lock_closure_build(env, parent, closure);
if (result != 0)
static int lovsub_lock_delete_one(const struct lu_env *env,
struct cl_lock *child, struct lov_lock *lov)
{
- struct cl_lock *parent;
+ struct cl_lock *parent;
int result;
ENTRY;
- parent = lov->lls_cl.cls_lock;
- result = 0;
+ parent = lov->lls_cl.cls_lock;
+ if (parent->cll_error)
+ RETURN(0);
+ result = 0;
switch (parent->cll_state) {
- case CLS_NEW:
+ case CLS_ENQUEUED:
+ /* See LU-1355 for the case that a glimpse lock is
+ * interrupted by signal */
+ LASSERT(parent->cll_flags & CLF_CANCELLED);
+ break;
case CLS_QUEUING:
- case CLS_ENQUEUED:
case CLS_FREEING:
cl_lock_signal(env, parent);
break;
- case CLS_UNLOCKING:
+ case CLS_INTRANSIT:
/*
* Here lies a problem: a sub-lock is canceled while top-lock
* is being unlocked. Top-lock cannot be moved into CLS_NEW
* to be reused immediately). Nor can we wait for top-lock
* state to change, because this can be synchronous to the
* current thread.
- *
+ *
* We know for sure that lov_lock_unuse() will be called at
* least one more time to finish un-using, so leave a mark on
* the top-lock, that will be seen by the next call to
* lov_lock_unuse().
*/
- lov->lls_unuse_race = 1;
+ if (cl_lock_is_intransit(parent))
+ lov->lls_cancel_race = 1;
break;
case CLS_CACHED:
/*
* enqueues missing sub-lock.
*/
cl_lock_state_set(env, parent, CLS_NEW);
+ /* fall through */
+ case CLS_NEW:
/*
* if last sub-lock is canceled, destroy the top-lock (which
* is now `empty') proactively.
}
break;
case CLS_HELD:
+ CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
default:
- CERROR("Impossible state: %i\n", parent->cll_state);
+ CERROR("Impossible state: %d\n", parent->cll_state);
LBUG();
+ break;
}
RETURN(result);
struct lov_lock_sub *subdata;
restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry_safe(scan, temp,
+ &sub->lss_parents, lll_list) {
lov = scan->lll_super;
subdata = &lov->lls_sub[scan->lll_idx];
lovsub_parent_lock(env, lov);
struct lov_lock *lov;
struct lov_lock_link *scan;
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
+ cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) {
lov = scan->lll_super;
(*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
if (lov != NULL)
int result;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, CFS_ALLOC_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
if (lsk != NULL) {
CFS_INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);