static mdl_mode_t cml_lock_mode(const struct lu_env *env,
struct md_object *mo, mdl_mode_t lm)
{
-#if defined(HAVE_SPLIT_SUPPORT) && defined(CONFIG_PDIROPS)
+#if defined(HAVE_SPLIT_SUPPORT)
struct md_attr *ma = &cmm_env_info(env)->cmi_ma;
int rc, split;
ENTRY;
-
memset(ma, 0, sizeof(*ma));
-
- /*
+
+ /*
* Check only if we need protection from split. If not - mdt
* handles other cases.
*/
rc);
RETURN(MDL_MINMODE);
}
-
+
if (lm == MDL_PW && split == CMM_EXPECT_SPLIT)
RETURN(MDL_EX);
RETURN(MDL_MINMODE);
__u32 start;
__u32 end;
-#ifdef CONFIG_PDIROPS
+#if 1
mode = LCK_PR;
#else
mode = LCK_CR;
static int it_to_lock_mode(struct lookup_intent *it)
{
ENTRY;
-
-#ifdef CONFIG_PDIROPS
+
+#if 1
/* CREAT needs to be tested before open (both could be set) */
if (it->it_op & IT_CREAT)
return LCK_PW;
struct lustre_handle *lockh)
{
struct ldlm_res_id res_id =
- { .name = {fid_seq(fid),
+ { .name = {fid_seq(fid),
fid_oid(fid),
fid_ver(fid)} };
struct obd_device *obd = class_exp2obd(exp);
int flags, void *opaque)
{
struct ldlm_res_id res_id =
- { .name = {fid_seq(fid),
- fid_oid(fid),
+ { .name = {fid_seq(fid),
+ fid_oid(fid),
fid_ver(fid)} };
struct obd_device *obd = class_exp2obd(exp);
int rc;
-
+
ENTRY;
-
+
rc = ldlm_cli_cancel_unused(obd->obd_namespace, &res_id,
flags, opaque);
RETURN(rc);
}
int mdc_change_cbdata(struct obd_export *exp,
- const struct lu_fid *fid,
+ const struct lu_fid *fid,
ldlm_iterator_t it, void *data)
{
struct ldlm_res_id res_id = { .name = {0} };
res_id.name[1] = fid_oid(fid);
res_id.name[2] = fid_ver(fid);
- ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
+ ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace,
&res_id, it, data);
EXIT;
struct ptlrpc_request *req;
struct obd_device *obddev = class_exp2obd(exp);
struct ldlm_res_id res_id =
- { .name = {fid_seq(&op_data->fid1),
+ { .name = {fid_seq(&op_data->fid1),
fid_oid(&op_data->fid1),
fid_ver(&op_data->fid1)} };
ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_LOOKUP } };
lit->opc = (__u64)it->it_op;
/* pack the intended request */
- mdc_open_pack(req, DLM_INTENT_REC_OFF, op_data,
- it->it_create_mode, 0, it->it_flags,
+ mdc_open_pack(req, DLM_INTENT_REC_OFF, op_data,
+ it->it_create_mode, 0, it->it_flags,
lmm, lmmsize);
/* for remote client, fetch remote perm for current user */
if ((body->valid & (OBD_MD_FLDIREA | OBD_MD_FLEASIZE)) != 0) {
void *eadata;
-
+
/*
* The eadata is opaque; just check that it is there.
* Eventually, obd_unpackmd() will check the contents.
RETURN(-EPROTO);
}
if (body->valid & OBD_MD_FLMODEASIZE) {
- if (obddev->u.cli.cl_max_mds_easize <
+ if (obddev->u.cli.cl_max_mds_easize <
body->max_mdsize) {
- obddev->u.cli.cl_max_mds_easize =
+ obddev->u.cli.cl_max_mds_easize =
body->max_mdsize;
CDEBUG(D_INFO, "maxeasize become %d\n",
body->max_mdsize);
body->max_cookiesize);
}
}
-
+
/*
* We save the reply LOV EA in case we have to replay a
* create for recovery. If we didn't allocate a large
RETURN(rc);
}
-/*
+/*
* This long block is all about fixing up the lock and request state
* so that it is correct as of the moment _before_ the operation was
* applied; that way, the VFS will think that everything is normal and
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
", intent: %s flags %#o\n", op_data->namelen,
- op_data->name, PFID(&op_data->fid2),
+ op_data->name, PFID(&op_data->fid2),
PFID(&op_data->fid1), ldlm_it2str(it->it_op),
it->it_flags);
ldlm_policy_data_t policy;
ldlm_mode_t mode = LCK_CR;
- /* As not all attributes are kept under update lock, e.g.
- owner/group/acls are under lookup lock, we need both
+ /* As not all attributes are kept under update lock, e.g.
+ owner/group/acls are under lookup lock, we need both
ibits for GETATTR. */
- /* For CMD, UPDATE lock and LOOKUP lock can not be got
- * at the same for cross-object, so we can not match
- * the 2 lock at the same time FIXME: but how to handle
+ /* For CMD, UPDATE lock and LOOKUP lock can not be got
+ * at the same for cross-object, so we can not match
+ * the 2 lock at the same time FIXME: but how to handle
* the above situation */
policy.l_inodebits.bits = (it->it_op == IT_GETATTR) ?
MDS_INODELOCK_UPDATE : MDS_INODELOCK_LOOKUP;
if (rc < 0)
RETURN(rc);
memcpy(&it->d.lustre.it_lock_handle, &lockh, sizeof(lockh));
- } else if (!fid_is_sane(&op_data->fid2) ||
+ } else if (!fid_is_sane(&op_data->fid2) ||
!(it->it_flags & O_CHECK_STALE)) {
/* DISP_ENQ_COMPLETE set means there is extra reference on
* request referenced from this intent, saved for subsequent
/* If we were revalidating a fid/name pair, mark the intent in
* case we fail and get called again from lookup */
- if (fid_is_sane(&op_data->fid2) && it->it_flags & O_CHECK_STALE
+ if (fid_is_sane(&op_data->fid2) && it->it_flags & O_CHECK_STALE
&& it->it_op != IT_GETATTR) {
it_set_disposition(it, DISP_ENQ_COMPLETE);
-
+
/* Also: did we find the same inode? */
if (!lu_fid_eq(&op_data->fid2, &mdt_body->fid1))
RETURN(-ESTALE);
ldlm_policy_data_t policy = lock->l_policy_data;
LDLM_DEBUG(lock, "matching against this");
- LASSERTF(fid_res_name_eq(&mdt_body->fid1,
+ LASSERTF(fid_res_name_eq(&mdt_body->fid1,
&lock->l_resource->lr_name),
"Lock res_id: %lu/%lu/%lu, fid: %lu/%lu/%lu.\n",
(unsigned long)lock->l_resource->lr_name.name[0],
rep->lock_policy_res1 |= flag;
}
-#ifdef CONFIG_PDIROPS
+#if 1
static mdl_mode_t mdt_mdl_lock_modes[] = {
[0] = MDL_MINMODE,
[1] = MDL_EX,
static inline mdl_mode_t mdt_ldlm_mode2mdl_mode(ldlm_mode_t mode)
{
int idx = ffs((int)mode);
-
+
LASSERT(idx >= 0);
LASSERT(IS_PO2(mode));
LASSERT(idx < ARRAY_SIZE(mdt_mdl_lock_modes));
static inline ldlm_mode_t mdt_mdl_mode2ldlm_mode(mdl_mode_t mode)
{
int idx = ffs((int)mode);
-
+
LASSERT(idx >= 0);
LASSERT(IS_PO2(mode));
LASSERT(idx < ARRAY_SIZE(mdt_ldlm_lock_modes));
}
}
-#ifdef CONFIG_PDIROPS
+#if 1
static ldlm_mode_t mdt_lock_pdo_mode(struct mdt_thread_info *info,
struct mdt_object *o,
ldlm_mode_t lm)
*/
LASSERT(lm != LCK_MINMODE);
-
+
if (mdt_object_exists(o) > 0) {
/*
* Ask underlaying level its opinion about possible locks.
/* Default locks for non-existing objects. */
mode = MDL_MINMODE;
}
-
+
if (mode != MDL_MINMODE) {
/* Lower layer said what lock mode it likes to be, use it. */
return mdt_mdl_mode2ldlm_mode(mode);
} else {
- /*
+ /*
* Lower layer does not want to specify locking mode. We od it
* our selves. No special protection is needed, just flush
* client's cache on modification.
(int)mode);
}
}
-
+
return LCK_MINMODE;
}
#endif
static int mdt_raw_lookup(struct mdt_thread_info *info,
struct mdt_object *parent,
- const char* name,
+ const char* name,
struct ldlm_reply *ldlm_rep)
{
struct md_object *next = mdt_object_child(info->mti_object);
struct mdt_body *repbody;
int rc;
ENTRY;
-
- if (reqbody->valid != OBD_MD_FLID)
+
+ if (reqbody->valid != OBD_MD_FLID)
RETURN(0);
-
+
/* Only got the fid of this obj by name */
rc = mdo_lookup(info->mti_env, next, name, child_fid);
if (rc != 0) {
repbody = req_capsule_server_get(&info->mti_pill, &RMF_MDT_BODY);
repbody->fid1 = *child_fid;
repbody->valid = OBD_MD_FLID;
-
+
RETURN(1);
}
*/
child_bits &= ~MDS_INODELOCK_LOOKUP;
child_bits |= MDS_INODELOCK_UPDATE;
-
+
rc = mdt_object_lock(info, child, lhc, child_bits,
MDT_LOCAL_LOCK);
}
CERROR("Can't pack response, rc %d\n", rc);
RETURN(err_serious(rc));
}
-
+
if (req_capsule_has_field(pill, &RMF_MDT_BODY, RCL_SERVER)) {
repbody = req_capsule_server_get(pill, &RMF_MDT_BODY);
LASSERT(repbody);
memset(policy, 0, sizeof *policy);
fid_build_reg_res_name(mdt_object_fid(o), res_id);
-
-#ifdef CONFIG_PDIROPS
- /*
+
+#if 1
+ /*
* Take PDO lock on whole directory and build correct @res_id for lock
- * on part of directrory.
+ * on part of directory.
*/
if (lh->mlh_type == MDT_PDO_LOCK && lh->mlh_pdo_hash != 0) {
lh->mlh_pdo_mode = mdt_lock_pdo_mode(info, o, lh->mlh_reg_mode);
if (lh->mlh_pdo_mode != LCK_MINMODE) {
- /*
+ /*
* Do not use LDLM_FL_LOCAL_ONLY for paralell lock, it
* is never going to be sent to client and we do not
* want it slowed down due to possible cancels.
RETURN(rc);
}
- /*
+ /*
* Finish res_id initializing by name hash marking patr of
* directory which is taking modification.
*/
res_id->name[LUSTRE_RES_ID_HSH_OFF] = lh->mlh_pdo_hash;
}
#endif
-
+
policy->l_inodebits.bits = ibits;
- /*
+ /*
* Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
* going to be sent to client. If it is - mdt_intent_policy() path will
* fix it up and turns FL_LOCAL flag off.
*/
rc = mdt_fid_lock(ns, &lh->mlh_reg_lh, lh->mlh_reg_mode, policy,
res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB);
-
-#ifdef CONFIG_PDIROPS
+
+#if 1
if (rc && lh->mlh_type == MDT_PDO_LOCK) {
mdt_fid_unlock(&lh->mlh_pdo_lh, lh->mlh_pdo_mode);
lh->mlh_pdo_lh.cookie = 0ull;
}
#endif
-
+
RETURN(rc);
}
struct ptlrpc_request *req = mdt_info_req(info);
ENTRY;
-#ifdef CONFIG_PDIROPS
+#if 1
if (lustre_handle_is_used(&lh->mlh_pdo_lh)) {
/* Do not save PDO locks to request, just decref. */
mdt_fid_unlock(&lh->mlh_pdo_lh,
lh->mlh_pdo_lh.cookie = 0;
}
#endif
-
+
if (lustre_handle_is_used(&lh->mlh_reg_lh)) {
if (decref) {
mdt_fid_unlock(&lh->mlh_reg_lh,
int rc;
printk(KERN_INFO "Lustre: MetaData Target; info@clusterfs.com\n");
-
+
mdt_num_threads = MDT_NUM_THREADS;
lprocfs_init_vars(mdt, &lvars);
rc = class_register_type(&mdt_obd_device_ops, NULL,
static inline int req_xid_is_last(struct ptlrpc_request *req)
{
struct mdt_client_data *mcd = req->rq_export->exp_mdt_data.med_mcd;
- return (req->rq_xid == mcd->mcd_last_xid ||
+ return (req->rq_xid == mcd->mcd_last_xid ||
req->rq_xid == mcd->mcd_last_close_xid);
}
struct mdt_lock_handle {
/* Lock type, reg for cross-ref use or pdo lock. */
mdl_type_t mlh_type;
-
+
/* Regular lock */
struct lustre_handle mlh_reg_lh;
ldlm_mode_t mlh_reg_mode;
* reduce stack consumption.
*/
struct mdt_thread_info {
- /*
+ /*
* XXX: Part One:
* The following members will be filled expilictly
* with specific data in mdt_thread_info_init().
__u64 mti_transno;
- /*
+ /*
* XXX: Part Two:
* The following members will be filled expilictly
* with zero in mdt_thread_info_init(). These members may be used
*/
__u64 mti_opdata;
- /*
+ /*
* XXX: Part Three:
* The following members will be filled expilictly
- * with zero in mdt_reint_unpack(), because they are only used
+ * with zero in mdt_reint_unpack(), because they are only used
* by reint requests (including mdt_reint_open()).
*/
struct md_create_spec mti_spec;
- /*
+ /*
* XXX: Part Four:
* The following members will _NOT_ be initialized at all.
* DO NOT expect them to contain any valid value.
ci->mc_capa[offset] = capa;
}
-#ifdef CONFIG_PDIROPS
+#if 1
#define MDT_RD_LOCK LCK_PR
#define MDT_WR_LOCK LCK_PW
#define MDT_EX_LOCK LCK_EX