* with the DLM lock reply from the server. Copy of osc_update_enqueue()
* logic.
*
- * This can be optimized to not update attributes when lock is a result of a
- * local match.
- *
* Called under lock and resource spin-locks.
*/
static void osc_lock_lvb_update(const struct lu_env *env,
}
static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
- struct lustre_handle *lockh, bool lvb_update)
+ struct lustre_handle *lockh)
{
struct ldlm_lock *dlmlock;
descr->cld_gid = ext->gid;
/* no lvb update for matched lock */
- if (lvb_update) {
+ if (!ldlm_is_lvb_cached(dlmlock)) {
LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
dlmlock, NULL);
+ ldlm_set_lvb_cached(dlmlock);
}
LINVRNT(osc_lock_invariant(oscl));
}
}
if (rc == 0)
- osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
+ osc_lock_granted(env, oscl, lockh);
/* Error handling, some errors are tolerable. */
if (oscl->ols_locklessable && rc == -EUSERS) {
lock_res_and_lock(dlmlock);
LASSERT(ldlm_is_granted(dlmlock));
- /* there is no osc_lock associated with speculative locks */
+ /* there is no osc_lock associated with speculative locks
+ * thus no need to set LDLM_FL_LVB_CACHED */
osc_lock_lvb_update(env, osc, dlmlock, NULL);
unlock_res_and_lock(dlmlock);
struct ost_lvb *lvb;
struct req_capsule *cap;
struct cl_object *obj = NULL;
+ struct ldlm_resource *res = dlmlock->l_resource;
+ struct ldlm_match_data matchdata = { 0 };
+ union ldlm_policy_data policy;
+ enum ldlm_mode mode = LCK_PW | LCK_GROUP | LCK_PR;
int result;
__u16 refcheck;
if (IS_ERR(env))
GOTO(out, result = PTR_ERR(env));
+ policy.l_extent.start = 0;
+ policy.l_extent.end = LUSTRE_EOF;
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_ast_data != NULL) {
- obj = osc2cl(dlmlock->l_ast_data);
- cl_object_get(obj);
+ matchdata.lmd_mode = &mode;
+ matchdata.lmd_policy = &policy;
+ matchdata.lmd_flags = LDLM_FL_TEST_LOCK | LDLM_FL_CBPENDING;
+ matchdata.lmd_unref = 1;
+ matchdata.lmd_has_ast_data = true;
+
+ LDLM_LOCK_GET(dlmlock);
+
+ /* If any dlmlock has l_ast_data set, we must find it or we risk
+ * missing a size update done under a different lock.
+ */
+ while (dlmlock) {
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_PUT(dlmlock);
+
+ dlmlock = NULL;
+
+ if (obj == NULL && res->lr_type == LDLM_EXTENT) {
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_SIZE_DATA))
+ break;
+
+ lock_res(res);
+ dlmlock = search_itree(res, &matchdata);
+ unlock_res(res);
+ }
}
- unlock_res_and_lock(dlmlock);
if (obj != NULL) {
/* Do not grab the mutex of cl_lock for glimpse.
}
result = osc_enqueue_base(exp, resname, &oscl->ols_flags,
policy, &oscl->ols_lvb,
- osc->oo_oinfo->loi_kms_valid,
upcall, cookie,
&oscl->ols_einfo, PTLRPCD_SET, async,
oscl->ols_speculative);