r=wangdi,jay
Rework the patch of bug 20305.
* doesn't enqueue CLM_WRITE sub-locks.
*/
if (cio->cui_glimpse)
* doesn't enqueue CLM_WRITE sub-locks.
*/
if (cio->cui_glimpse)
- result = descr->cld_mode == CLM_PHANTOM;
+ result = descr->cld_mode != CLM_WRITE;
/*
* Also, don't match incomplete write locks for read, otherwise read
/*
* Also, don't match incomplete write locks for read, otherwise read
lov_sublock_unlock(env, sub, closure, subenv);
}
result = lov_subresult(result, rc);
lov_sublock_unlock(env, sub, closure, subenv);
}
result = lov_subresult(result, rc);
break;
}
cl_lock_closure_fini(closure);
break;
}
cl_lock_closure_fini(closure);
lov_sublock_unlock(env, sub, closure, subenv);
}
result = lov_subresult(result, rc);
lov_sublock_unlock(env, sub, closure, subenv);
}
result = lov_subresult(result, rc);
break;
}
cl_lock_closure_fini(closure);
break;
}
cl_lock_closure_fini(closure);
const struct cl_lock_descr *need,
const struct cl_io *io)
{
const struct cl_lock_descr *need,
const struct cl_io *io)
{
+ struct osc_lock *ols = cl2osc_lock(slice);
+
+ /* If the lock hasn't ever enqueued, it can't be matched because
+ * enqueue process brings in many information which can be used to
+ * determine things such as lockless, CEF_MUST, etc.
+ */
+ if (ols->ols_state < OLS_ENQUEUED)
+ return 0;
- if (need->cld_mode == CLM_PHANTOM)
- return need->cld_mode == slice->cls_lock->cll_descr.cld_mode;
+ /* Don't match this lock if the lock is able to become lockless lock.
+ * This is because the new lock might be covering a mmap region and
+ * so that it must have a cached at the local side. */
+ if (ols->ols_state < OLS_UPCALL_RECEIVED && ols->ols_locklessable)
+ return 0;
+
+ /* If the lock is going to be canceled, no reason to match it as well */
+ if (ols->ols_state > OLS_RELEASED)
+ return 0;