X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flov%2Flov_lock.c;h=1faf299b4a44b542acb02b06a80ed7c26e176747;hb=d658d73b5231ba24d2e31315102c6e17dd247364;hp=35548e5889fc2188a08b524dc12a50b57811f53e;hpb=0125d830821f08854adc587f831d06bf30ccbfe8;p=fs%2Flustre-release.git diff --git a/lustre/lov/lov_lock.c b/lustre/lov/lov_lock.c index 35548e5..1faf299 100644 --- a/lustre/lov/lov_lock.c +++ b/lustre/lov/lov_lock.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -157,7 +155,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, parent = lck->lls_cl.cls_lock; lls = &lck->lls_sub[idx]; - descr = &lls->sub_descr; + descr = &lls->sub_got; subenv = lov_sublock_env_get(env, parent, lls); if (!IS_ERR(subenv)) { @@ -271,8 +269,10 @@ static int lov_subresult(int result, int rc) ENTRY; - LASSERT(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT); - LASSERT(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT); + LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT, + "result = %d", result); + LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT, + "rc = %d\n", rc); CLASSERT(CLO_WAIT < CLO_REPEAT); /* calculate ranks in the ordering above */ @@ -318,7 +318,7 @@ static int lov_lock_sub_init(const struct lu_env *env, * XXX for wide striping smarter algorithm is desirable, * breaking out of the loop, early. */ - if (lov_stripe_intersects(r0->lo_lsm, i, + if (lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) nr++; } @@ -336,7 +336,7 @@ static int lov_lock_sub_init(const struct lu_env *env, * top-lock. */ for (i = 0, nr = 0; i < r0->lo_nr; ++i) { - if (lov_stripe_intersects(r0->lo_lsm, i, + if (lov_stripe_intersects(loo->lo_lsm, i, file_start, file_end, &start, &end)) { struct cl_lock_descr *descr; @@ -526,10 +526,13 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck, /* first, try to enqueue a sub-lock ... */ result = cl_enqueue_try(env, sublock, io, enqflags); - if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) - /* if it is enqueued, try to `wait' on it---maybe it's already - * granted */ - result = cl_wait_try(env, sublock); + if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) { + /* if it is enqueued, try to `wait' on it---maybe it's already + * granted */ + result = cl_wait_try(env, sublock); + if (result == CLO_REENQUEUED) + result = CLO_WAIT; + } /* * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in * parallel, otherwise---enqueue has to wait until sub-lock is granted @@ -753,17 +756,9 @@ static void lov_lock_cancel(const struct lu_env *env, switch(sublock->cll_state) { case CLS_HELD: - rc = cl_unuse_try(subenv->lse_env, - sublock); + rc = cl_unuse_try(subenv->lse_env, sublock); lov_sublock_release(env, lck, i, 0, 0); break; - case CLS_ENQUEUED: - /* TODO: it's not a good idea to cancel this - * lock because it's innocent. But it's - * acceptable. The better way would be to - * define a new lock method to unhold the - * dlm lock. */ - cl_lock_cancel(env, sublock); default: lov_sublock_release(env, lck, i, 1, 0); break; @@ -929,7 +924,7 @@ static int lock_lock_multi_match() if (sub->sub_lock == NULL) continue; subobj = sub->sub_descr.cld_obj; - if (!lov_stripe_intersects(r0->lo_lsm, sub->sub_stripe, + if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe, fstart, fend, &start, &end)) continue; subneed->cld_start = cl_index(subobj, start); @@ -953,7 +948,7 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env, const struct cl_lock_descr *child, const struct cl_lock_descr *descr) { - struct lov_stripe_md *lsm = lov_r0(lov)->lo_lsm; + struct lov_stripe_md *lsm = lov->lo_lsm; obd_off start; obd_off end; int result; @@ -1012,6 +1007,11 @@ static int lov_lock_fits_into(const struct lu_env *env, ENTRY; + /* for top lock, it's necessary to match enq flags otherwise it will + * run into problem if a sublock is missing and reenqueue. */ + if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags) + return 0; + if (need->cld_mode == CLM_GROUP) /* * always allow to match group lock. @@ -1191,6 +1191,41 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, RETURN(result); } +static void lov_empty_lock_fini(const struct lu_env *env, + struct cl_lock_slice *slice) +{ + struct lov_lock *lck = cl2lov_lock(slice); + OBD_SLAB_FREE_PTR(lck, lov_lock_kmem); +} + +static int lov_empty_lock_print(const struct lu_env *env, void *cookie, + lu_printer_t p, const struct cl_lock_slice *slice) +{ + (*p)(env, cookie, "empty\n"); + return 0; +} + +static const struct cl_lock_operations lov_empty_lock_ops = { + .clo_fini = lov_empty_lock_fini, + .clo_print = lov_empty_lock_print +}; + +int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, + struct cl_lock *lock, const struct cl_io *io) +{ + struct lov_lock *lck; + int result = -ENOMEM; + + ENTRY; + OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO); + if (lck != NULL) { + cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); + lck->lls_orig = lock->cll_descr; + result = 0; + } + RETURN(result); +} + static struct cl_lock_closure *lov_closure_get(const struct lu_env *env, struct cl_lock *parent) {