if (!osfs)
GOTO(out, rc = -EPROTO);
- if (mdt_is_sum_statfs_client(req->rq_export))
+ if (mdt_is_sum_statfs_client(req->rq_export) &&
+ lustre_packed_msg_size(req->rq_reqmsg) ==
+ req_capsule_fmt_size(req->rq_reqmsg->lm_magic,
+ &RQF_MDS_STATFS_NEW, RCL_CLIENT)) {
+ req_capsule_extend(info->mti_pill, &RQF_MDS_STATFS_NEW);
reqbody = req_capsule_client_get(info->mti_pill, &RMF_MDT_BODY);
+ }
if (reqbody && reqbody->mbo_valid & OBD_MD_FLAGSTATFS)
msf = &mdt->mdt_sum_osfs;
/* permission check. Make sure the calling process having permission
* to write both files. */
rc = mo_permission(info->mti_env, NULL, mdt_object_child(o1), NULL,
- MAY_WRITE);
+ MAY_WRITE);
if (rc < 0)
GOTO(put, rc);
rc = mo_permission(info->mti_env, NULL, mdt_object_child(o2), NULL,
- MAY_WRITE);
+ MAY_WRITE);
if (rc < 0)
GOTO(put, rc);
{
struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
+ struct ldlm_cb_set_arg *arg = data;
bool commit_async = false;
int rc;
ENTRY;
unlock_res_and_lock(lock);
RETURN(0);
}
- /* There is no lock conflict if l_blocking_lock == NULL,
- * it indicates a blocking ast sent from ldlm_lock_decref_internal
- * when the last reference to a local lock was released */
- if (lock->l_req_mode & (LCK_PW | LCK_EX) &&
- lock->l_blocking_lock != NULL) {
+
+ /* A blocking ast may be sent from ldlm_lock_decref_internal
+ * when the last reference to a local lock was released and
+ * during blocking event from ldlm_work_bl_ast_lock().
+ * The 'data' parameter is l_ast_data in the first case and
+ * callback arguments in the second one. Distinguish them by that.
+ */
+ if (!data || data == lock->l_ast_data || !arg->bl_desc)
+ goto skip_cos_checks;
+
+ if (lock->l_req_mode & (LCK_PW | LCK_EX)) {
if (mdt_cos_is_enabled(mdt)) {
- if (lock->l_client_cookie !=
- lock->l_blocking_lock->l_client_cookie)
+ if (!arg->bl_desc->bl_same_client)
mdt_set_lock_sync(lock);
} else if (mdt_slc_is_enabled(mdt) &&
- ldlm_is_cos_incompat(lock->l_blocking_lock)) {
+ arg->bl_desc->bl_cos_incompat) {
mdt_set_lock_sync(lock);
/*
* we may do extra commit here, but there is a small
*/
commit_async = true;
}
- } else if (lock->l_req_mode == LCK_COS &&
- lock->l_blocking_lock != NULL) {
+ } else if (lock->l_req_mode == LCK_COS) {
commit_async = true;
}
+skip_cos_checks:
rc = ldlm_blocking_ast_nocheck(lock);
if (commit_async) {
}
}
+ /* other components like LFSCK can use lockless access
+ * and populate cache, so we better invalidate it */
+ mo_invalidate(info->mti_env, mdt_object_child(o));
+
RETURN(0);
}
if (layout_size > info->mti_mdt->mdt_max_mdsize)
info->mti_mdt->mdt_max_mdsize = layout_size;
}
+ CDEBUG(D_INFO, "%s: layout_size %d\n",
+ mdt_obd_name(info->mti_mdt), layout_size);
}
/*
out:
lhc->mlh_reg_lh.cookie = 0;
- return rc;
+ RETURN(rc);
}
static int mdt_intent_open(enum ldlm_intent_flags it_opc,
obd = class_name2obd(dev);
LASSERT(obd != NULL);
- m->mdt_max_mdsize = MAX_MD_SIZE; /* 4 stripes */
+ m->mdt_max_mdsize = MAX_MD_SIZE_OLD;
m->mdt_opts.mo_evict_tgt_nids = 1;
m->mdt_opts.mo_cos = MDT_COS_DEFAULT;
* archive request into a noop if it's not actually
* dirty.
*/
- if (mfd->mfd_mode & MDS_FMODE_WRITE)
+ if (mfd->mfd_open_flags & MDS_FMODE_WRITE)
rc = mdt_ctxt_add_dirty_flag(&env, info, mfd);
/* Don't unlink orphan on failover umount, LU-184 */