* GPL HEADER END
*/
/*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2014, 2015, Intel Corporation.
*/
/*
* lustre/target/update_trans.c
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
struct sub_thandle_cookie *stc;
- CDEBUG(mask, "st %p obd %s committed %d sub_th %p\n",
+ CDEBUG(mask, "st %p obd %s committed %d stopped %d sub_th %p\n",
st, st->st_dt->dd_lu_dev.ld_obd->obd_name,
- st->st_committed, st->st_sub_th);
+ st->st_committed, st->st_stopped, st->st_sub_th);
list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
CDEBUG(mask, " cookie "DOSTID": %u\n",
* for example if the the OSP is used to connect to OST */
ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt != NULL);
/* Not ready to record updates yet. */
- if (ctxt->loc_handle == NULL)
- GOTO(out_put, rc = 0);
+ if (ctxt == NULL || ctxt->loc_handle == NULL) {
+ llog_ctxt_put(ctxt);
+ return 0;
+ }
rc = llog_declare_add(env, ctxt->loc_handle,
&record->lur_hdr, sub_th);
ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt != NULL);
-
- /* Not ready to record updates yet, usually happens
- * in error handler path */
- if (ctxt->loc_handle == NULL)
- GOTO(llog_put, rc = 0);
+ /* If ctxt == NULL, then it means updates on OST (only happens
+ * during migration), and we do not track those updates for now */
+ /* If ctxt->loc_handle == NULL, then it does not need to record
+ * update, usually happens in error handler path */
+ if (ctxt == NULL || ctxt->loc_handle == NULL) {
+ llog_ctxt_put(ctxt);
+ RETURN(0);
+ }
/* Since the cross-MDT updates will includes both local
* and remote updates, the update ops count must > 1 */
return st;
}
-/**
- * sub thandle commit callback
- *
- * Mark the sub thandle to be committed and if all sub thandle are committed
- * notify the top thandle.
- *
- * \param[in] env execution environment
- * \param[in] sub_th sub thandle being committed
- * \param[in] cb commit callback
- * \param[in] err trans result
- */
-static void sub_trans_commit_cb(struct lu_env *env,
- struct thandle *sub_th,
- struct dt_txn_commit_cb *cb, int err)
+static void sub_trans_commit_cb_internal(struct top_multiple_thandle *tmt,
+ struct thandle *sub_th, int err)
{
struct sub_thandle *st;
- struct top_multiple_thandle *tmt = cb->dcb_data;
bool all_committed = true;
- ENTRY;
/* Check if all sub thandles are committed */
+ spin_lock(&tmt->tmt_sub_lock);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_sub_th == sub_th) {
st->st_committed = 1;
if (!st->st_committed)
all_committed = false;
}
+ spin_unlock(&tmt->tmt_sub_lock);
if (tmt->tmt_result == 0)
tmt->tmt_result = err;
RETURN_EXIT;
}
+/**
+ * sub thandle commit callback
+ *
+ * Mark the sub thandle to be committed and if all sub thandle are committed
+ * notify the top thandle.
+ *
+ * \param[in] env execution environment
+ * \param[in] sub_th sub thandle being committed
+ * \param[in] cb commit callback
+ * \param[in] err trans result
+ */
+static void sub_trans_commit_cb(struct lu_env *env,
+ struct thandle *sub_th,
+ struct dt_txn_commit_cb *cb, int err)
+{
+ struct top_multiple_thandle *tmt = cb->dcb_data;
+
+ sub_trans_commit_cb_internal(tmt, sub_th, err);
+}
+
static void sub_thandle_register_commit_cb(struct sub_thandle *st,
struct top_multiple_thandle *tmt)
{
st->st_sub_th = sub_th;
sub_th->th_wait_submit = 1;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return 0;
}
{
struct target_distribute_txn_data *tdtd;
struct dt_device *dt = new->tmt_master_sub_dt;
+ struct sub_thandle *st;
LASSERT(dt != NULL);
tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
new->tmt_batchid = tdtd->tdtd_batchid++;
list_add_tail(&new->tmt_commit_list, &tdtd->tdtd_list);
spin_unlock(&tdtd->tdtd_batchid_lock);
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
}
struct dt_device *dt = new->tmt_master_sub_dt;
struct top_multiple_thandle *tmt;
struct target_distribute_txn_data *tdtd;
+ struct sub_thandle *st;
bool at_head = false;
LASSERT(dt != NULL);
list_add(&new->tmt_commit_list, &tdtd->tdtd_list);
}
spin_unlock(&tdtd->tdtd_batchid_lock);
+
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
+
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
if (new->tmt_committed && at_head)
ENTRY;
if (tmt == NULL) {
+ if (th->th_sync)
+ top_th->tt_master_sub_thandle->th_sync = th->th_sync;
+ if (th->th_local)
+ top_th->tt_master_sub_thandle->th_local = th->th_local;
+ top_th->tt_master_sub_thandle->th_tags = th->th_tags;
rc = dt_trans_start(env, top_th->tt_master_sub_thandle->th_dev,
top_th->tt_master_sub_thandle);
RETURN(rc);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_sub_th == NULL)
continue;
- st->st_sub_th->th_sync = th->th_sync;
- st->st_sub_th->th_local = th->th_local;
+ if (th->th_sync)
+ st->st_sub_th->th_sync = th->th_sync;
+ if (th->th_local)
+ st->st_sub_th->th_local = th->th_local;
st->st_sub_th->th_tags = th->th_tags;
rc = dt_trans_start(env, st->st_sub_th->th_dev,
st->st_sub_th);
if (rc != 0)
GOTO(out, rc);
- sub_thandle_register_stop_cb(st, tmt);
- sub_thandle_register_commit_cb(st, tmt);
+ LASSERT(st->st_started == 0);
+ st->st_started = 1;
}
out:
th->th_result = rc;
if (likely(top_th->tt_multiple_thandle == NULL)) {
LASSERT(master_dev != NULL);
+
+ if (th->th_sync)
+ top_th->tt_master_sub_thandle->th_sync = th->th_sync;
+ if (th->th_local)
+ top_th->tt_master_sub_thandle->th_local = th->th_local;
+ top_th->tt_master_sub_thandle->th_tags = th->th_tags;
rc = dt_trans_stop(env, master_dev,
top_th->tt_master_sub_thandle);
OBD_FREE_PTR(top_th);
CERROR("%s: cannot prepare updates: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
CERROR("%s: write updates failed: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
}
/* Step 2: Stop the transaction on the master MDT, and fill the
* master transno in the update logs to other MDT. */
if (master_st != NULL && master_st->st_sub_th != NULL) {
- master_st->st_sub_th->th_local = th->th_local;
- master_st->st_sub_th->th_sync = th->th_sync;
+ if (th->th_local)
+ master_st->st_sub_th->th_local = th->th_local;
+ if (th->th_sync)
+ master_st->st_sub_th->th_sync = th->th_sync;
master_st->st_sub_th->th_tags = th->th_tags;
master_st->st_sub_th->th_result = th->th_result;
rc = dt_trans_stop(env, master_st->st_dt, master_st->st_sub_th);
+ /* If it does not write_updates, then we call submit callback
+ * here, otherwise callback is done through
+ * osd(osp)_trans_commit_cb() */
+ if (!master_st->st_started &&
+ !list_empty(&tmt->tmt_commit_list))
+ sub_trans_commit_cb_internal(tmt,
+ master_st->st_sub_th, rc);
if (rc < 0) {
th->th_result = rc;
GOTO(stop_other_trans, rc);
if (st == master_st || st->st_sub_th == NULL)
continue;
- st->st_sub_th->th_sync = th->th_sync;
- st->st_sub_th->th_local = th->th_local;
+ if (th->th_sync)
+ st->st_sub_th->th_sync = th->th_sync;
+ if (th->th_local)
+ st->st_sub_th->th_local = th->th_local;
st->st_sub_th->th_tags = th->th_tags;
st->st_sub_th->th_result = th->th_result;
rc = dt_trans_stop(env, st->st_sub_th->th_dev,
INIT_LIST_HEAD(&tmt->tmt_sub_thandle_list);
INIT_LIST_HEAD(&tmt->tmt_commit_list);
atomic_set(&tmt->tmt_refcount, 1);
-
+ spin_lock_init(&tmt->tmt_sub_lock);
init_waitqueue_head(&tmt->tmt_stop_waitq);
+
top_th->tt_multiple_thandle = tmt;
return 0;
st->st_sub_th = sub_th;
sub_th->th_top = &top_th->tt_super;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return st;
}
struct dt_device *sub_dt)
{
struct sub_thandle *st = NULL;
+ struct sub_thandle *master_st = NULL;
struct top_thandle *top_th;
struct thandle *sub_th = NULL;
int rc = 0;
/* Add master sub th to the top trans list */
tmt->tmt_master_sub_dt =
top_th->tt_master_sub_thandle->th_dev;
- st = create_sub_thandle_with_thandle(top_th,
- top_th->tt_master_sub_thandle);
- if (IS_ERR(st))
- GOTO(stop_trans, rc = PTR_ERR(st));
+ master_st = create_sub_thandle_with_thandle(top_th,
+ top_th->tt_master_sub_thandle);
+ if (IS_ERR(master_st)) {
+ rc = PTR_ERR(master_st);
+ master_st = NULL;
+ GOTO(stop_trans, rc);
+ }
}
/* create and init sub th to the top trans list */
st = create_sub_thandle_with_thandle(top_th, sub_th);
+ if (IS_ERR(st)) {
+ rc = PTR_ERR(st);
+ st = NULL;
+ GOTO(stop_trans, rc);
+ }
st->st_sub_th->th_wait_submit = 1;
stop_trans:
if (rc < 0) {
- if (st != NULL)
- OBD_FREE_PTR(st);
+ if (master_st != NULL) {
+ list_del(&master_st->st_sub_list);
+ OBD_FREE_PTR(master_st);
+ }
sub_th->th_result = rc;
dt_trans_stop(env, sub_dt, sub_th);
sub_th = ERR_PTR(rc);
obd = st->st_dt->dd_lu_dev.ld_obd;
ctxt = llog_get_context(obd, LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt);
+ if (ctxt == NULL)
+ continue;
list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
cookie = &stc->stc_cookie;
if (fid_is_zero(&cookie->lgc_lgl.lgl_oi.oi_fid))
tdtd->tdtd_committed_batchid);
/* update globally committed on a storage */
if (batchid > tdtd->tdtd_committed_batchid) {
- distribute_txn_commit_batchid_update(&env, tdtd,
+ rc = distribute_txn_commit_batchid_update(&env, tdtd,
batchid);
- spin_lock(&tdtd->tdtd_batchid_lock);
- if (batchid > tdtd->tdtd_batchid) {
- /* This might happen during recovery,
- * batchid is initialized as last transno,
- * and the batchid in the update records
- * on other MDTs might be bigger than
- * the batchid, so we need update it to
- * avoid duplicate batchid. */
- CDEBUG(D_HA, "%s update batchid from "LPU64
- " to "LPU64"\n",
- tdtd->tdtd_lut->lut_obd->obd_name,
- tdtd->tdtd_batchid, batchid);
- tdtd->tdtd_batchid = batchid;
- }
- spin_unlock(&tdtd->tdtd_batchid_lock);
+ if (rc == 0)
+ batchid = 0;
}
/* cancel the records for committed batchid's */
/* XXX: should we postpone cancel's till the end of recovery? */
int rc;
ENTRY;
- spin_lock_init(&tdtd->tdtd_batchid_lock);
INIT_LIST_HEAD(&tdtd->tdtd_list);
+ INIT_LIST_HEAD(&tdtd->tdtd_replay_finish_list);
+ INIT_LIST_HEAD(&tdtd->tdtd_replay_list);
+ spin_lock_init(&tdtd->tdtd_batchid_lock);
+ spin_lock_init(&tdtd->tdtd_replay_list_lock);
+ tdtd->tdtd_replay_handler = distribute_txn_replay_handle;
+ tdtd->tdtd_replay_ready = 0;
tdtd->tdtd_batchid = lut->lut_last_transno + 1;
init_waitqueue_head(&lut->lut_tdtd_commit_thread.t_ctl_waitq);
init_waitqueue_head(&tdtd->tdtd_commit_thread_waitq);
+ init_waitqueue_head(&tdtd->tdtd_recovery_threads_waitq);
atomic_set(&tdtd->tdtd_refcount, 0);
+ atomic_set(&tdtd->tdtd_recovery_threads_count, 0);
tdtd->tdtd_lut = lut;
rc = distribute_txn_commit_batchid_init(env, tdtd);