X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Flov%2Flovsub_lock.c;h=22f35e60fabd7f877fbeca0ffd810db3bd25c96d;hp=4810da41d425b53346dd26e11a9e3cf79a7d0d7c;hb=db46ab8d95b1c6e040039c65acb0c30641421659;hpb=fa507031d245b08c7f24efed32819daf2aa42ab3 diff --git a/lustre/lov/lovsub_lock.c b/lustre/lov/lovsub_lock.c index 4810da4..22f35e6 100644 --- a/lustre/lov/lovsub_lock.c +++ b/lustre/lov/lovsub_lock.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -66,26 +66,26 @@ static void lovsub_lock_fini(const struct lu_env *env, static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov) { - struct cl_lock *parent; - - ENTRY; - parent = lov->lls_cl.cls_lock; - cl_lock_get(parent); - lu_ref_add(&parent->cll_reference, "lovsub-parent", cfs_current()); - cl_lock_mutex_get(env, parent); - EXIT; + struct cl_lock *parent; + + ENTRY; + parent = lov->lls_cl.cls_lock; + cl_lock_get(parent); + lu_ref_add(&parent->cll_reference, "lovsub-parent", current); + cl_lock_mutex_get(env, parent); + EXIT; } static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov) { - struct cl_lock *parent; - - ENTRY; - parent = lov->lls_cl.cls_lock; - cl_lock_mutex_put(env, lov->lls_cl.cls_lock); - lu_ref_del(&parent->cll_reference, "lovsub-parent", cfs_current()); - cl_lock_put(env, parent); - EXIT; + struct cl_lock *parent; + + ENTRY; + parent = lov->lls_cl.cls_lock; + cl_lock_mutex_put(env, lov->lls_cl.cls_lock); + lu_ref_del(&parent->cll_reference, "lovsub-parent", current); + cl_lock_put(env, parent); + EXIT; } /** @@ -153,10 +153,9 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env, * Maps start/end offsets within a stripe, to offsets within a file. */ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, - struct lov_object *obj, - int stripe, struct cl_lock_descr *out) + struct lov_object *lov, + int stripe, struct cl_lock_descr *out) { - struct lov_stripe_md *lsm = lov_r0(obj)->lo_lsm; pgoff_t size; /* stripe size in pages */ pgoff_t skip; /* how many pages in every stripe are occupied by * "other" stripes */ @@ -167,9 +166,9 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in, start = in->cld_start; end = in->cld_end; - if (lsm->lsm_stripe_count > 1) { - size = cl_index(lov2cl(obj), lsm->lsm_stripe_size); - skip = (lsm->lsm_stripe_count - 1) * size; + if (lov->lo_lsm->lsm_stripe_count > 1) { + size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size); + skip = (lov->lo_lsm->lsm_stripe_count - 1) * size; /* XXX overflow check here? */ start += start/size * skip + stripe * size; @@ -205,7 +204,6 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, const struct cl_lock_descr *d, int idx) { struct cl_lock *parent; - struct cl_lock *child; struct lovsub_object *subobj; struct cl_lock_descr *pd; struct cl_lock_descr *parent_descr; @@ -215,7 +213,6 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, parent_descr = &parent->cll_descr; LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode)); - child = sublock->lss_cl.cls_lock; subobj = cl2lovsub(sublock->lss_cl.cls_obj); pd = &lov_env_info(env)->lti_ldescr; @@ -223,7 +220,14 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov, pd->cld_mode = parent_descr->cld_mode; pd->cld_gid = parent_descr->cld_gid; lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd); - lov->lls_sub[idx].sub_got = *d; + + /* LU-3027: only update extent of lock, plus the change in + * lovsub_lock_delete() that lock extent is modified after a sublock + * is canceled, we can make sure that the lock extent won't be updated + * any more. Therefore, lov_lock_fits_into() will always find feasible + * locks */ + lov->lls_sub[idx].sub_got.cld_start = d->cld_start; + lov->lls_sub[idx].sub_got.cld_end = d->cld_end; /* * Notify top-lock about modification, if lock description changes * materially. @@ -300,7 +304,13 @@ static int lovsub_lock_delete_one(const struct lu_env *env, RETURN(0); result = 0; + lov->lls_ever_canceled = 1; switch (parent->cll_state) { + case CLS_ENQUEUED: + /* See LU-1355 for the case that a glimpse lock is + * interrupted by signal */ + LASSERT(parent->cll_flags & CLF_CANCELLED); + break; case CLS_QUEUING: case CLS_FREEING: cl_lock_signal(env, parent); @@ -376,9 +386,9 @@ static int lovsub_lock_delete_one(const struct lu_env *env, } } break; - case CLS_ENQUEUED: case CLS_HELD: CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n"); + /* falling through */ default: CERROR("Impossible state: %d\n", parent->cll_state); LBUG(); @@ -414,15 +424,12 @@ static void lovsub_lock_delete(const struct lu_env *env, struct lov_lock *lov; struct lov_lock_link *scan; struct lov_lock_link *temp; - struct lov_lock_sub *subdata; restart = 0; cfs_list_for_each_entry_safe(scan, temp, &sub->lss_parents, lll_list) { lov = scan->lll_super; - subdata = &lov->lls_sub[scan->lll_idx]; lovsub_parent_lock(env, lov); - subdata->sub_got = subdata->sub_descr; lov_lock_unlink(env, scan, sub); restart = lovsub_lock_delete_one(env, child, lov); lovsub_parent_unlock(env, lov); @@ -471,7 +478,7 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, int result; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO); if (lsk != NULL) { CFS_INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);