}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_LINK_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_LINK_OP, 1);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
LASSERTF(mdd_object_exists(mdd_cobj) > 0, "FID is "DFID"\n",
PFID(mdd_object_fid(mdd_cobj)));
- rc = mdd_log_txn_param_build(env, cobj, ma, MDD_TXN_UNLINK_OP);
+ rc = mdd_log_txn_param_build(env, cobj, ma, MDD_TXN_UNLINK_OP, 1);
if (rc)
RETURN(rc);
}
}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_INSERT_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_INSERT_OP, 0);
handle = mdd_trans_start(env, mdo2mdd(pobj));
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
}
}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_DELETE_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_INDEX_DELETE_OP, 0);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
}
}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_TGT_OP);
+ if (tobj && mdd_object_exists(mdd_tobj))
+ mdd_log_txn_param_build(env, tobj, ma, MDD_TXN_RENAME_TGT_OP,1);
+ else
+ mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_TGT_OP, 1);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
if (rc)
RETURN(rc);
- mdd_txn_param_build(env, mdd, MDD_TXN_CREATE_DATA_OP);
+ mdd_create_txn_param_build(env, mdd, lmm, MDD_TXN_CREATE_DATA_OP, 0);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_free, rc = PTR_ERR(handle));
got_def_acl = 1;
}
- mdd_txn_param_build(env, mdd, MDD_TXN_MKDIR_OP);
+ mdd_create_txn_param_build(env, mdd, lmm, MDD_TXN_MKDIR_OP, 1);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_free, rc = PTR_ERR(handle));
}
}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_OP);
+ if (tobj && mdd_object_exists(mdd_tobj))
+ mdd_log_txn_param_build(env, tobj, ma, MDD_TXN_RENAME_OP, 2);
+ else
+ mdd_txn_param_build(env, mdd, MDD_TXN_RENAME_OP, 2);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
/* mdd_trans.c */
void mdd_txn_param_build(const struct lu_env *env, struct mdd_device *mdd,
- enum mdd_txn_op);
+ enum mdd_txn_op, int changelog_cnt);
+int mdd_create_txn_param_build(const struct lu_env *env, struct mdd_device *mdd,
+ struct lov_mds_md *lmm, enum mdd_txn_op op,
+ int changelog_cnt);
int mdd_log_txn_param_build(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma, enum mdd_txn_op);
+ struct md_attr *ma, enum mdd_txn_op,
+ int changelog_cnt);
int mdd_setattr_txn_param_build(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma, enum mdd_txn_op);
+ struct md_attr *ma, enum mdd_txn_op,
+ int changelog_cnt);
int mdd_lov_destroy(const struct lu_env *env, struct mdd_device *mdd,
struct mdd_object *obj, struct lu_attr *la);
struct thandle *handle;
struct lov_mds_md *lmm = NULL;
struct llog_cookie *logcookies = NULL;
- int rc, lmm_size = 0, cookie_size = 0;
+ int rc, lmm_size = 0, cookie_size = 0, chlog_cnt;
struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
-#ifdef HAVE_QUOTA_SUPPORT
struct obd_device *obd = mdd->mdd_obd_dev;
struct mds_obd *mds = &obd->u.mds;
+#ifdef HAVE_QUOTA_SUPPORT
unsigned int qnids[MAXQUOTAS] = { 0, 0 };
unsigned int qoids[MAXQUOTAS] = { 0, 0 };
int quota_opc = 0, block_count = 0;
ma->ma_attr.la_valid == LA_ATIME && la_copy->la_valid == 0)
RETURN(0);
- mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
- MDD_TXN_ATTR_SET_OP);
- handle = mdd_trans_start(env, mdd);
- if (IS_ERR(handle))
- RETURN(PTR_ERR(handle));
/*TODO: add lock here*/
/* start a log jounal handle if needed */
if (S_ISREG(mdd_object_type(mdd_obj)) &&
lmm_size = mdd_lov_mdsize(env, mdd);
lmm = mdd_max_lmm_get(env, mdd);
if (lmm == NULL)
- GOTO(cleanup, rc = -ENOMEM);
+ GOTO(no_trans, rc = -ENOMEM);
rc = mdd_get_md_locked(env, mdd_obj, lmm, &lmm_size,
XATTR_NAME_LOV);
if (rc < 0)
- GOTO(cleanup, rc);
+ GOTO(no_trans, rc);
}
+ chlog_cnt = 1;
+ if (la_copy->la_valid && !(la_copy->la_valid & LA_FLAGS) && lmm_size) {
+ chlog_cnt += (lmm->lmm_stripe_count >= 0) ?
+ lmm->lmm_stripe_count : mds->mds_lov_desc.ld_tgt_count;
+ }
+ mdd_setattr_txn_param_build(env, obj, (struct md_attr *)ma,
+ MDD_TXN_ATTR_SET_OP, chlog_cnt);
+ handle = mdd_trans_start(env, mdd);
+ if (IS_ERR(handle))
+ GOTO(no_trans, rc = PTR_ERR(handle));
+
if (ma->ma_attr.la_valid & (LA_MTIME | LA_CTIME))
CDEBUG(D_INODE, "setting mtime "LPU64", ctime "LPU64"\n",
ma->ma_attr.la_mtime, ma->ma_attr.la_ctime);
rc = mdd_attr_set_changelog(env, obj, handle,
ma->ma_attr.la_valid);
mdd_trans_stop(env, mdd, rc, handle);
+no_trans:
if (rc == 0 && (lmm != NULL && lmm_size > 0 )) {
/*set obd attr, if needed*/
rc = mdd_lov_setattr_async(env, mdd_obj, lmm, lmm_size,
if (rc)
RETURN(rc);
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP, 1);
/* security-replated changes may require sync */
if (!strcmp(name, XATTR_NAME_ACL_ACCESS) &&
mdd->mdd_sync_permission == 1)
if (rc)
RETURN(rc);
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP, 1);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
LASSERT(mdd_object_exists(mdd_obj) > 0);
- rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
+ rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP, 0);
if (rc)
RETURN(rc);
}
#endif
- mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_OBJECT_CREATE_OP, 0);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
GOTO(out_pending, rc = PTR_ERR(handle));
int rc;
ENTRY;
- mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP);
+ mdd_txn_param_build(env, mdd, MDD_TXN_XATTR_SET_OP, 0);
handle = mdd_trans_start(env, mdd);
if (IS_ERR(handle))
RETURN(-ENOMEM);
if (mdd_obj->mod_count == 1 &&
(mdd_obj->mod_flags & (ORPHAN_OBJ | DEAD_OBJ)) != 0) {
again:
- rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP);
+ rc = mdd_log_txn_param_build(env, obj, ma, MDD_TXN_UNLINK_OP, 0);
if (rc)
RETURN(rc);
handle = mdd_trans_start(env, mdo2mdd(obj));
{
struct thandle *th = NULL;
struct mdd_device *mdd = mdo2mdd(&obj->mod_obj);
+ struct md_attr *ma = &mdd_env_info(env)->mti_ma;
int rc = 0;
ENTRY;
- mdd_txn_param_build(env, mdd, MDD_TXN_UNLINK_OP);
+ /* init ma */
+ ma->ma_lmm_size = mdd_lov_mdsize(env, mdd);
+ ma->ma_lmm = mdd_max_lmm_get(env, mdd);
+ ma->ma_cookie_size = mdd_lov_cookiesize(env, mdd);
+ ma->ma_cookie = mdd_max_cookie_get(env, mdd);
+ ma->ma_need = MA_INODE | MA_LOV | MA_COOKIE;
+ ma->ma_valid = 0;
+
+ mdd_log_txn_param_build(env, &obj->mod_obj, ma, MDD_TXN_UNLINK_OP, 0);
th = mdd_trans_start(env, mdd);
if (IS_ERR(th)) {
CERROR("Cannot get thandle\n");
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
*/
/*
* This file is part of Lustre, http://www.lustre.org/
}
void mdd_txn_param_build(const struct lu_env *env, struct mdd_device *mdd,
- enum mdd_txn_op op)
+ enum mdd_txn_op op, int changelog_cnt)
{
LASSERT(0 <= op && op < MDD_TXN_LAST_OP);
txn_param_init(&mdd_env_info(env)->mti_param,
mdd->mdd_tod[op].mod_credits);
+ if (changelog_cnt > 0) {
+ txn_param_credit_add(&mdd_env_info(env)->mti_param,
+ changelog_cnt * dto_txn_credits[DTO_LOG_REC]);
+ }
+}
+
+int mdd_create_txn_param_build(const struct lu_env *env, struct mdd_device *mdd,
+ struct lov_mds_md *lmm, enum mdd_txn_op op,
+ int changelog_cnt)
+{
+ int stripes = 0;
+ ENTRY;
+
+ LASSERT(op == MDD_TXN_CREATE_DATA_OP || op == MDD_TXN_MKDIR_OP);
+
+ if (lmm == NULL)
+ GOTO(out, 0);
+ /* only replay create request will cause lov_objid update */
+ if (!mdd->mdd_obd_dev->obd_recovering)
+ GOTO(out, 0);
+
+ /* add possible orphan unlink rec credits used in lov_objid update */
+ if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1) {
+ stripes = le32_to_cpu(((struct lov_mds_md_v1*)lmm)
+ ->lmm_stripe_count);
+ } else if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V3){
+ stripes = le32_to_cpu(((struct lov_mds_md_v3*)lmm)
+ ->lmm_stripe_count);
+ } else {
+ CERROR("Unknown lmm type %X\n", le32_to_cpu(lmm->lmm_magic));
+ LBUG();
+ }
+out:
+ mdd_txn_param_build(env, mdd, op, stripes + changelog_cnt);
+ RETURN(0);
}
int mdd_log_txn_param_build(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma, enum mdd_txn_op op)
+ struct md_attr *ma, enum mdd_txn_op op,
+ int changelog_cnt)
{
struct mdd_device *mdd = mdo2mdd(&md2mdd_obj(obj)->mod_obj);
- int rc, log_credits, stripe;
+ int rc, stripe = 0;
ENTRY;
- mdd_txn_param_build(env, mdd, op);
-
if (S_ISDIR(lu_object_attr(&obj->mo_lu)))
- RETURN(0);
+ GOTO(out, rc = 0);
- LASSERT(op == MDD_TXN_UNLINK_OP || op == MDD_TXN_RENAME_OP);
+ LASSERT(op == MDD_TXN_UNLINK_OP || op == MDD_TXN_RENAME_OP ||
+ op == MDD_TXN_RENAME_TGT_OP);
rc = mdd_lmm_get_locked(env, md2mdd_obj(obj), ma);
if (rc || !(ma->ma_valid & MA_LOV))
- RETURN(rc);
+ GOTO(out, rc);
LASSERTF(le32_to_cpu(ma->ma_lmm->lmm_magic) == LOV_MAGIC_V1 ||
le32_to_cpu(ma->ma_lmm->lmm_magic) == LOV_MAGIC_V3,
else
stripe = le32_to_cpu(ma->ma_lmm->lmm_stripe_count);
- log_credits = stripe * dto_txn_credits[DTO_LOG_REC];
- txn_param_credit_add(&mdd_env_info(env)->mti_param, log_credits);
+out:
+ mdd_txn_param_build(env, mdd, op, stripe + changelog_cnt);
+
RETURN(rc);
}
int mdd_setattr_txn_param_build(const struct lu_env *env, struct md_object *obj,
- struct md_attr *ma, enum mdd_txn_op op)
+ struct md_attr *ma, enum mdd_txn_op op,
+ int changelog_cnt)
{
struct mdd_device *mdd = mdo2mdd(&md2mdd_obj(obj)->mod_obj);
ENTRY;
- mdd_txn_param_build(env, mdd, op);
+ mdd_txn_param_build(env, mdd, op, changelog_cnt);
if (ma->ma_attr.la_valid & (LA_UID | LA_GID))
txn_param_credit_add(&mdd_env_info(env)->mti_param,
dto_txn_credits[DTO_ATTR_SET_CHOWN]);