From: jxiong Date: Fri, 9 Oct 2009 11:21:30 +0000 (+0000) Subject: b=20826 X-Git-Tag: v1_9_290~42 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=e3fa506969073cdcbca471c276f09d60fea8fe5e b=20826 r=wangdi,jay Rework the patch of bug 20305. --- diff --git a/lustre/lclient/lcommon_cl.c b/lustre/lclient/lcommon_cl.c index 87d2b91..dd04f9b 100644 --- a/lustre/lclient/lcommon_cl.c +++ b/lustre/lclient/lcommon_cl.c @@ -624,7 +624,7 @@ int ccc_lock_fits_into(const struct lu_env *env, * doesn't enqueue CLM_WRITE sub-locks. */ if (cio->cui_glimpse) - result = descr->cld_mode == CLM_PHANTOM; + result = descr->cld_mode != CLM_WRITE; /* * Also, don't match incomplete write locks for read, otherwise read diff --git a/lustre/lov/lov_lock.c b/lustre/lov/lov_lock.c index c65b1a7..e2b1520 100644 --- a/lustre/lov/lov_lock.c +++ b/lustre/lov/lov_lock.c @@ -706,7 +706,7 @@ static int lov_lock_wait(const struct lu_env *env, lov_sublock_unlock(env, sub, closure, subenv); } result = lov_subresult(result, rc); - if (result < 0) + if (result != 0) break; } cl_lock_closure_fini(closure); @@ -759,7 +759,7 @@ static int lov_lock_use(const struct lu_env *env, lov_sublock_unlock(env, sub, closure, subenv); } result = lov_subresult(result, rc); - if (result < 0) + if (result != 0) break; } cl_lock_closure_fini(closure); diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index b224bdd..2cc82bc 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -1574,10 +1574,26 @@ static int osc_lock_fits_into(const struct lu_env *env, const struct cl_lock_descr *need, const struct cl_io *io) { + struct osc_lock *ols = cl2osc_lock(slice); + + /* If the lock hasn't ever enqueued, it can't be matched because + * enqueue process brings in many information which can be used to + * determine things such as lockless, CEF_MUST, etc. + */ + if (ols->ols_state < OLS_ENQUEUED) + return 0; - if (need->cld_mode == CLM_PHANTOM) - return need->cld_mode == slice->cls_lock->cll_descr.cld_mode; + /* Don't match this lock if the lock is able to become lockless lock. + * This is because the new lock might be covering a mmap region and + * so that it must have a cached at the local side. */ + if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable) + return 0; + + /* If the lock is going to be canceled, no reason to match it as well */ + if (ols->ols_state > OLS_RELEASED) + return 0; + /* go for it. */ return 1; }