}
static int cml_attr_set(const struct lu_context *ctx, struct md_object *mo,
- const struct md_attr *attr)
+ struct md_attr *attr)
{
int rc;
ENTRY;
}
static int cmr_attr_set(const struct lu_context *ctx, struct md_object *mo,
- const struct md_attr *attr)
+ struct md_attr *attr)
{
RETURN(-EFAULT);
}
struct md_object_operations {
int (*moo_attr_get)(const struct lu_context *ctxt, struct md_object *dt,
struct md_attr *attr);
+
+ /* the attr may be ajusted/fixed in various situation in MDD;
+ * so it is no longer a const.
+ */
int (*moo_attr_set)(const struct lu_context *ctxt, struct md_object *dt,
- const struct md_attr *attr);
+ struct md_attr *attr);
int (*moo_xattr_get)(const struct lu_context *ctxt,
struct md_object *obj,
}
static inline int mo_attr_set(const struct lu_context *cx, struct md_object *m,
- const struct md_attr *at)
+ struct md_attr *at)
{
LASSERT(m->mo_ops->moo_attr_set);
return m->mo_ops->moo_attr_set(cx, m, at);
* and port to
*/
int mdd_fix_attr(const struct lu_context *ctxt, struct mdd_object *obj,
- const struct md_attr *ma)
+ struct md_attr *ma)
{
- struct lu_attr *la = (struct lu_attr*)&ma->ma_attr;
+ struct lu_attr *la = &ma->ma_attr;
struct lu_attr *tmp_la = &mdd_ctx_info(ctxt)->mti_la;
struct dt_object *next = mdd_object_child(obj);
time_t now = CURRENT_SECONDS;
- int rc = 0;
+ int rc;
ENTRY;
rc = next->do_ops->do_attr_get(ctxt, next, tmp_la);
if (((tmp_la->la_mode & (S_ISGID | S_IXGRP)) ==
(S_ISGID | S_IXGRP)) && !S_ISDIR(tmp_la->la_mode)) {
la->la_mode &= ~S_ISGID;
- la->la_valid |= ATTR_MODE;
+ la->la_valid |= LA_MODE;
}
} else if (la->la_valid & LA_MODE) {
int mode = la->la_mode;
/* set attr and LOV EA at once, return updated attr */
static int mdd_attr_set(const struct lu_context *ctxt,
- struct md_object *obj, const struct md_attr *ma)
+ struct md_object *obj, struct md_attr *ma)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
/* start a log jounal handle if needed */
if (S_ISREG(mdd_object_type(mdd_obj)) &&
ma->ma_attr.la_valid & (LA_UID | LA_GID)) {
- mdd_lov_mdsize(ctxt, mdd, &max_size);
+ max_size = mdd_lov_mdsize(ctxt, mdd);
OBD_ALLOC(lmm, max_size);
if (lmm == NULL)
GOTO(cleanup, rc = -ENOMEM);
struct mdd_object *mdd_sobj = mdd_object_find(ctxt, mdd, lf);
struct mdd_object *mdd_tobj = NULL;
struct thandle *handle;
- int rc, locked = 0;
+ int rc;
ENTRY;
if (tobj)
int *cookie_size)
{
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
- int rc;
-
ENTRY;
- rc = mdd_lov_mdsize(ctx, mdd, md_size);
- if (rc)
- RETURN(rc);
- rc = mdd_lov_cookiesize(ctx, mdd, cookie_size);
+ *md_size = mdd_lov_mdsize(ctx, mdd);
+ *cookie_size = mdd_lov_cookiesize(ctx, mdd);
- RETURN(rc);
+ RETURN(0);
}
static void __mdd_ref_add(const struct lu_context *ctxt, struct mdd_object *obj,
return lu_object_attr(&obj->mod_obj.mo_lu);
}
-int mdd_lov_mdsize(const struct lu_context *ctxt, struct mdd_device *mdd,
- int *md_size);
-int mdd_lov_cookiesize(const struct lu_context *ctxt, struct mdd_device *mdd,
- int *cookie_size);
+static inline int mdd_lov_mdsize(const struct lu_context *ctxt,
+ struct mdd_device *mdd)
+{
+ struct obd_device *obd = mdd2_obd(mdd);
+ return obd->u.mds.mds_max_mdsize;
+}
+
+static inline int mdd_lov_cookiesize(const struct lu_context *ctxt,
+ struct mdd_device *mdd)
+{
+ struct obd_device *obd = mdd2_obd(mdd);
+ return obd->u.mds.mds_max_cookiesize;
+}
+
#endif
/* get lov ea from parent and set to lov */
struct lov_mds_md *__lmm;
int __lmm_size, returned_lmm_size;
- __lmm_size = mdd2_obd(mdd)->u.mds.mds_max_mdsize;
+ __lmm_size = mdd_lov_mdsize(ctxt, mdd);
OBD_ALLOC(__lmm, __lmm_size);
if (__lmm == NULL)
RETURN(rc);
}
-int mdd_lov_mdsize(const struct lu_context *ctxt, struct mdd_device *mdd,
- int *md_size)
-{
- struct obd_device *obd = mdd2_obd(mdd);
- *md_size = obd->u.mds.mds_max_mdsize;
- RETURN(0);
-}
-
-int mdd_lov_cookiesize(const struct lu_context *ctxt, struct mdd_device *mdd,
- int *cookie_size)
-{
- struct obd_device *obd = mdd2_obd(mdd);
- *cookie_size = obd->u.mds.mds_max_cookiesize;
- RETURN(0);
-}
-
} mdt_opts;
/* lock to pretect epoch and write count
- * because we need not allocate memory, spinlock is fast.
*/
spinlock_t mdt_epoch_lock;
__u64 mdt_io_epoch;