X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flov%2Flovsub_lock.c;h=ab1d9d9b0e858ed782beca1bc7fdcbaaac503ac0;hb=091c8a93c184b2ff75a71ba665dbfcf38e435a6f;hp=c97cb356b3b5e692348adccedb43bb69e491d9c5;hpb=b15f3875f46eec3c5186fe6b84cf2cda7ceb8518;p=fs%2Flustre-release.git diff --git a/lustre/lov/lovsub_lock.c b/lustre/lov/lovsub_lock.c index c97cb35..ab1d9d9 100644 --- a/lustre/lov/lovsub_lock.c +++ b/lustre/lov/lovsub_lock.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,8 +24,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -59,7 +59,7 @@ static void lovsub_lock_fini(const struct lu_env *env, ENTRY; lsl = cl2lovsub_lock(slice); - LASSERT(list_empty(&lsl->lss_parents)); + LASSERT(cfs_list_empty(&lsl->lss_parents)); OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem); EXIT; } @@ -88,40 +88,6 @@ static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov) EXIT; } -static int lovsub_lock_state_one(const struct lu_env *env, - const struct lovsub_lock *lovsub, - struct lov_lock *lov) -{ - struct cl_lock *parent; - struct cl_lock *child; - int restart = 0; - - ENTRY; - parent = lov->lls_cl.cls_lock; - child = lovsub->lss_cl.cls_lock; - - if (lovsub->lss_active != parent) { - lovsub_parent_lock(env, lov); - if (child->cll_error != 0 && parent->cll_error == 0) { - /* - * This is a deadlock case: - * cl_lock_error(for the parent lock) - * -> cl_lock_delete - * -> lov_lock_delete - * -> cl_lock_enclosure - * -> cl_lock_mutex_try(for the child lock) - */ - cl_lock_mutex_put(env, child); - cl_lock_error(env, parent, child->cll_error); - restart = 1; - } else { - cl_lock_signal(env, parent); - } - lovsub_parent_unlock(env, lov); - } - RETURN(restart); -} - /** * Implements cl_lock_operations::clo_state() method for lovsub layer, which * method is called whenever sub-lock state changes. Propagates state change @@ -133,22 +99,20 @@ static void lovsub_lock_state(const struct lu_env *env, { struct lovsub_lock *sub = cl2lovsub_lock(slice); struct lov_lock_link *scan; - int restart = 0; LASSERT(cl_lock_is_mutexed(slice->cls_lock)); ENTRY; - do { - restart = 0; - list_for_each_entry(scan, &sub->lss_parents, lll_list) { - restart = lovsub_lock_state_one(env, sub, - scan->lll_super); - if (restart) { - cl_lock_mutex_get(env, slice->cls_lock); - break; - } + cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) { + struct lov_lock *lov = scan->lll_super; + struct cl_lock *parent = lov->lls_cl.cls_lock; + + if (sub->lss_active != parent) { + lovsub_parent_lock(env, lov); + cl_lock_signal(env, parent); + lovsub_parent_unlock(env, lov); } - } while(restart); + } EXIT; } @@ -167,7 +131,7 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env, LASSERT(cl_lock_is_mutexed(slice->cls_lock)); - if (!list_empty(&lock->lss_parents)) { + if (!cfs_list_empty(&lock->lss_parents)) { /* * It is not clear whether all parents have to be asked and * their estimations summed, or it is enough to ask one. For @@ -203,9 +167,6 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, start = in->cld_start; end = in->cld_end; - /* - * XXX join file support. - */ if (lsm->lsm_stripe_count > 1) { size = cl_index(lov2cl(obj), lsm->lsm_stripe_size); skip = (lsm->lsm_stripe_count - 1) * size; @@ -244,7 +205,6 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, const struct cl_lock_descr *d, int idx) { struct cl_lock *parent; - struct cl_lock *child; struct lovsub_object *subobj; struct cl_lock_descr *pd; struct cl_lock_descr *parent_descr; @@ -254,7 +214,6 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, parent_descr = &parent->cll_descr; LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode)); - child = sublock->lss_cl.cls_lock; subobj = cl2lovsub(sublock->lss_cl.cls_obj); pd = &lov_env_info(env)->lti_ldescr; @@ -287,7 +246,7 @@ static int lovsub_lock_modify(const struct lu_env *env, LASSERT(cl_lock_mode_match(d->cld_mode, s->cls_lock->cll_descr.cld_mode)); - list_for_each_entry(scan, &lock->lss_parents, lll_list) { + cfs_list_for_each_entry(scan, &lock->lss_parents, lll_list) { int rc; lov = scan->lll_super; @@ -314,7 +273,7 @@ static int lovsub_lock_closure(const struct lu_env *env, sub = cl2lovsub_lock(slice); result = 0; - list_for_each_entry(scan, &sub->lss_parents, lll_list) { + cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) { parent = scan->lll_super->lls_cl.cls_lock; result = cl_lock_closure_build(env, parent, closure); if (result != 0) @@ -330,21 +289,26 @@ static int lovsub_lock_closure(const struct lu_env *env, static int lovsub_lock_delete_one(const struct lu_env *env, struct cl_lock *child, struct lov_lock *lov) { - struct cl_lock *parent; + struct cl_lock *parent; int result; ENTRY; - parent = lov->lls_cl.cls_lock; - result = 0; + parent = lov->lls_cl.cls_lock; + if (parent->cll_error) + RETURN(0); + result = 0; switch (parent->cll_state) { - case CLS_NEW: + case CLS_ENQUEUED: + /* See LU-1355 for the case that a glimpse lock is + * interrupted by signal */ + LASSERT(parent->cll_flags & CLF_CANCELLED); + break; case CLS_QUEUING: - case CLS_ENQUEUED: case CLS_FREEING: cl_lock_signal(env, parent); break; - case CLS_UNLOCKING: + case CLS_INTRANSIT: /* * Here lies a problem: a sub-lock is canceled while top-lock * is being unlocked. Top-lock cannot be moved into CLS_NEW @@ -356,13 +320,14 @@ static int lovsub_lock_delete_one(const struct lu_env *env, * to be reused immediately). Nor can we wait for top-lock * state to change, because this can be synchronous to the * current thread. - * + * * We know for sure that lov_lock_unuse() will be called at * least one more time to finish un-using, so leave a mark on * the top-lock, that will be seen by the next call to * lov_lock_unuse(). */ - lov->lls_unuse_race = 1; + if (cl_lock_is_intransit(parent)) + lov->lls_cancel_race = 1; break; case CLS_CACHED: /* @@ -373,6 +338,8 @@ static int lovsub_lock_delete_one(const struct lu_env *env, * enqueues missing sub-lock. */ cl_lock_state_set(env, parent, CLS_NEW); + /* fall through */ + case CLS_NEW: /* * if last sub-lock is canceled, destroy the top-lock (which * is now `empty') proactively. @@ -413,9 +380,11 @@ static int lovsub_lock_delete_one(const struct lu_env *env, } break; case CLS_HELD: + CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n"); default: - CERROR("Impossible state: %i\n", parent->cll_state); + CERROR("Impossible state: %d\n", parent->cll_state); LBUG(); + break; } RETURN(result); @@ -450,8 +419,8 @@ static void lovsub_lock_delete(const struct lu_env *env, struct lov_lock_sub *subdata; restart = 0; - list_for_each_entry_safe(scan, temp, - &sub->lss_parents, lll_list) { + cfs_list_for_each_entry_safe(scan, temp, + &sub->lss_parents, lll_list) { lov = scan->lll_super; subdata = &lov->lls_sub[scan->lll_idx]; lovsub_parent_lock(env, lov); @@ -476,7 +445,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie, struct lov_lock *lov; struct lov_lock_link *scan; - list_for_each_entry(scan, &sub->lss_parents, lll_list) { + cfs_list_for_each_entry(scan, &sub->lss_parents, lll_list) { lov = scan->lll_super; (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov); if (lov != NULL)