* GPL HEADER END
*/
/*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2014, 2015, Intel Corporation.
*/
/*
* lustre/target/update_trans.c
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
struct sub_thandle_cookie *stc;
- CDEBUG(mask, "st %p obd %s committed %d sub_th %p\n",
+ CDEBUG(mask, "st %p obd %s committed %d stopped %d sub_th %p\n",
st, st->st_dt->dd_lu_dev.ld_obd->obd_name,
- st->st_committed, st->st_sub_th);
+ st->st_committed, st->st_stopped, st->st_sub_th);
list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
CDEBUG(mask, " cookie "DOSTID": %u\n",
return st;
}
-/**
- * sub thandle commit callback
- *
- * Mark the sub thandle to be committed and if all sub thandle are committed
- * notify the top thandle.
- *
- * \param[in] env execution environment
- * \param[in] sub_th sub thandle being committed
- * \param[in] cb commit callback
- * \param[in] err trans result
- */
-static void sub_trans_commit_cb(struct lu_env *env,
- struct thandle *sub_th,
- struct dt_txn_commit_cb *cb, int err)
+static void sub_trans_commit_cb_internal(struct top_multiple_thandle *tmt,
+ struct thandle *sub_th, int err)
{
struct sub_thandle *st;
- struct top_multiple_thandle *tmt = cb->dcb_data;
bool all_committed = true;
- ENTRY;
/* Check if all sub thandles are committed */
spin_lock(&tmt->tmt_sub_lock);
RETURN_EXIT;
}
+/**
+ * sub thandle commit callback
+ *
+ * Mark the sub thandle to be committed and if all sub thandle are committed
+ * notify the top thandle.
+ *
+ * \param[in] env execution environment
+ * \param[in] sub_th sub thandle being committed
+ * \param[in] cb commit callback
+ * \param[in] err trans result
+ */
+static void sub_trans_commit_cb(struct lu_env *env,
+ struct thandle *sub_th,
+ struct dt_txn_commit_cb *cb, int err)
+{
+ struct top_multiple_thandle *tmt = cb->dcb_data;
+
+ sub_trans_commit_cb_internal(tmt, sub_th, err);
+}
+
static void sub_thandle_register_commit_cb(struct sub_thandle *st,
struct top_multiple_thandle *tmt)
{
st->st_sub_th = sub_th;
sub_th->th_wait_submit = 1;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return 0;
}
{
struct target_distribute_txn_data *tdtd;
struct dt_device *dt = new->tmt_master_sub_dt;
+ struct sub_thandle *st;
LASSERT(dt != NULL);
tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
new->tmt_batchid = tdtd->tdtd_batchid++;
list_add_tail(&new->tmt_commit_list, &tdtd->tdtd_list);
spin_unlock(&tdtd->tdtd_batchid_lock);
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
}
struct dt_device *dt = new->tmt_master_sub_dt;
struct top_multiple_thandle *tmt;
struct target_distribute_txn_data *tdtd;
+ struct sub_thandle *st;
bool at_head = false;
LASSERT(dt != NULL);
list_add(&new->tmt_commit_list, &tdtd->tdtd_list);
}
spin_unlock(&tdtd->tdtd_batchid_lock);
+
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
+
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
if (new->tmt_committed && at_head)
if (rc != 0)
GOTO(out, rc);
- sub_thandle_register_stop_cb(st, tmt);
- sub_thandle_register_commit_cb(st, tmt);
+ LASSERT(st->st_started == 0);
+ st->st_started = 1;
}
out:
th->th_result = rc;
CERROR("%s: cannot prepare updates: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
CERROR("%s: write updates failed: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
}
master_st->st_sub_th->th_tags = th->th_tags;
master_st->st_sub_th->th_result = th->th_result;
rc = dt_trans_stop(env, master_st->st_dt, master_st->st_sub_th);
+ /* If it does not write_updates, then we call submit callback
+ * here, otherwise callback is done through
+ * osd(osp)_trans_commit_cb() */
+ if (!master_st->st_started &&
+ !list_empty(&tmt->tmt_commit_list))
+ sub_trans_commit_cb_internal(tmt,
+ master_st->st_sub_th, rc);
if (rc < 0) {
th->th_result = rc;
GOTO(stop_other_trans, rc);
st->st_sub_th = sub_th;
sub_th->th_top = &top_th->tt_super;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return st;
}
init_waitqueue_head(&lut->lut_tdtd_commit_thread.t_ctl_waitq);
init_waitqueue_head(&tdtd->tdtd_commit_thread_waitq);
+ init_waitqueue_head(&tdtd->tdtd_recovery_threads_waitq);
atomic_set(&tdtd->tdtd_refcount, 0);
+ atomic_set(&tdtd->tdtd_recovery_threads_count, 0);
tdtd->tdtd_lut = lut;
rc = distribute_txn_commit_batchid_init(env, tdtd);