* GPL HEADER END
*/
/*
- * Copyright (c) 2014, Intel Corporation.
+ * Copyright (c) 2015, 2017, Intel Corporation.
*/
/*
* lustre/target/update_trans.c
struct sub_thandle *st;
LASSERT(tmt->tmt_magic == TOP_THANDLE_MAGIC);
- CDEBUG(mask, "%s tmt %p refcount %d committed %d result %d"
- "batchid "LPU64"\n",
+ CDEBUG(mask, "%s tmt %p refcount %d committed %d result %d batchid %llu\n",
tmt->tmt_master_sub_dt ?
tmt->tmt_master_sub_dt->dd_lu_dev.ld_obd->obd_name :
"NULL",
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
struct sub_thandle_cookie *stc;
- CDEBUG(mask, "st %p obd %s committed %d sub_th %p\n",
+ CDEBUG(mask, "st %p obd %s committed %d started %d stopped %d "
+ "result %d sub_th %p\n",
st, st->st_dt->dd_lu_dev.ld_obd->obd_name,
- st->st_committed, st->st_sub_th);
+ st->st_committed, st->st_started, st->st_stopped,
+ st->st_result, st->st_sub_th);
list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
- CDEBUG(mask, " cookie "DOSTID": %u\n",
- POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
+ CDEBUG(mask, " cookie "DFID".%u\n",
+ PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
stc->stc_cookie.lgc_index);
}
}
* for example if the the OSP is used to connect to OST */
ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt != NULL);
/* Not ready to record updates yet. */
- if (ctxt->loc_handle == NULL)
- GOTO(out_put, rc = 0);
+ if (ctxt == NULL || ctxt->loc_handle == NULL) {
+ llog_ctxt_put(ctxt);
+ return 0;
+ }
rc = llog_declare_add(env, ctxt->loc_handle,
&record->lur_hdr, sub_th);
struct llog_update_record *record,
struct sub_thandle *sub_th)
{
- struct dt_device *dt = sub_th->st_dt;
- struct llog_ctxt *ctxt;
- int rc;
+ struct dt_device *dt = sub_th->st_dt;
+ struct llog_ctxt *ctxt;
struct llog_update_record *lur = NULL;
- struct update_params *params = NULL;
- __u32 update_count = 0;
- __u32 param_count = 0;
- __u32 last_update_count = 0;
- __u32 last_param_count = 0;
- void *src;
- void *start;
- void *next;
+ __u32 update_count = 0;
+ __u32 param_count = 0;
+ __u32 last_update_count = 0;
+ __u32 last_param_count = 0;
+ char *start;
+ char *cur;
+ char *next;
struct sub_thandle_cookie *stc;
+ size_t reclen;
+ bool eof = false;
+ int rc;
ENTRY;
ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt != NULL);
-
- /* Not ready to record updates yet, usually happens
- * in error handler path */
- if (ctxt->loc_handle == NULL)
- GOTO(llog_put, rc = 0);
+ /* If ctxt == NULL, then it means updates on OST (only happens
+ * during migration), and we do not track those updates for now */
+ /* If ctxt->loc_handle == NULL, then it does not need to record
+ * update, usually happens in error handler path */
+ if (ctxt == NULL || ctxt->loc_handle == NULL) {
+ llog_ctxt_put(ctxt);
+ RETURN(0);
+ }
/* Since the cross-MDT updates will includes both local
* and remote updates, the update ops count must > 1 */
"lrh_len %u record_size %zu\n", record->lur_hdr.lrh_len,
llog_update_record_size(record));
- if (likely(record->lur_hdr.lrh_len <= ctxt->loc_chunk_size)) {
+ /*
+ * If its size > llog chunk_size, then write current chunk to the update
+ * llog, NB the padding should >= LLOG_MIN_REC_SIZE.
+ *
+ * So check padding length is either >= LLOG_MIN_REC_SIZE or is 0
+ * (record length just matches the chunk size).
+ */
+
+ reclen = record->lur_hdr.lrh_len;
+ if (reclen + LLOG_MIN_REC_SIZE <= ctxt->loc_chunk_size ||
+ reclen == ctxt->loc_chunk_size) {
OBD_ALLOC_PTR(stc);
if (stc == NULL)
GOTO(llog_put, rc = -ENOMEM);
rc = llog_add(env, ctxt->loc_handle, &record->lur_hdr,
&stc->stc_cookie, sub_th->st_sub_th);
- CDEBUG(D_INFO, "%s: Add update log "DOSTID":%u: rc = %d\n",
+ CDEBUG(D_INFO, "%s: Add update log "DFID".%u: rc = %d\n",
dt->dd_lu_dev.ld_obd->obd_name,
- POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
+ PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
stc->stc_cookie.lgc_index, rc);
if (rc > 0) {
memcpy(lur, &record->lur_hdr, sizeof(record->lur_hdr));
lur->lur_update_rec.ur_update_count = 0;
lur->lur_update_rec.ur_param_count = 0;
- src = &record->lur_update_rec.ur_ops;
- start = next = src;
- lur->lur_hdr.lrh_len = llog_update_record_size(lur);
- params = update_records_get_params(&record->lur_update_rec);
+ start = (char *)&record->lur_update_rec.ur_ops;
+ cur = next = start;
do {
- size_t rec_len;
-
- if (update_count < record->lur_update_rec.ur_update_count) {
- next = update_op_next_op((struct update_op *)src);
- } else {
- if (param_count == 0)
- next = update_records_get_params(
- &record->lur_update_rec);
- else
- next = (char *)src +
- object_update_param_size(
- (struct object_update_param *)src);
+ if (update_count < record->lur_update_rec.ur_update_count)
+ next = (char *)update_op_next_op(
+ (struct update_op *)cur);
+ else if (param_count < record->lur_update_rec.ur_param_count)
+ next = (char *)update_param_next_param(
+ (struct update_param *)cur);
+ else
+ eof = true;
+
+ reclen = __llog_update_record_size(
+ __update_records_size(next - start));
+ if ((reclen + LLOG_MIN_REC_SIZE <= ctxt->loc_chunk_size ||
+ reclen == ctxt->loc_chunk_size) &&
+ !eof) {
+ cur = next;
+
+ if (update_count <
+ record->lur_update_rec.ur_update_count)
+ update_count++;
+ else if (param_count <
+ record->lur_update_rec.ur_param_count)
+ param_count++;
+ continue;
}
- rec_len = cfs_size_round((unsigned long)(next - src));
- /* If its size > llog chunk_size, then write current chunk to
- * the update llog. */
- if (lur->lur_hdr.lrh_len + rec_len + LLOG_MIN_REC_SIZE >
- ctxt->loc_chunk_size ||
- param_count == record->lur_update_rec.ur_param_count) {
- lur->lur_update_rec.ur_update_count =
- update_count > last_update_count ?
- update_count - last_update_count : 0;
- lur->lur_update_rec.ur_param_count = param_count -
- last_param_count;
-
- memcpy(&lur->lur_update_rec.ur_ops, start,
- (unsigned long)(src - start));
- if (last_update_count != 0)
- lur->lur_update_rec.ur_flags |=
- UPDATE_RECORD_CONTINUE;
-
- update_records_dump(&lur->lur_update_rec, D_INFO, true);
- lur->lur_hdr.lrh_len = llog_update_record_size(lur);
- LASSERT(lur->lur_hdr.lrh_len <= ctxt->loc_chunk_size);
-
- OBD_ALLOC_PTR(stc);
- if (stc == NULL)
- GOTO(llog_put, rc = -ENOMEM);
- INIT_LIST_HEAD(&stc->stc_list);
-
- rc = llog_add(env, ctxt->loc_handle,
- &lur->lur_hdr,
- &stc->stc_cookie, sub_th->st_sub_th);
-
- CDEBUG(D_INFO, "%s: Add update log "DOSTID":%u"
- " rc = %d\n", dt->dd_lu_dev.ld_obd->obd_name,
- POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
- stc->stc_cookie.lgc_index, rc);
-
- if (rc > 0) {
- list_add(&stc->stc_list,
- &sub_th->st_cookie_list);
- rc = 0;
- } else {
- OBD_FREE_PTR(stc);
- GOTO(llog_put, rc);
- }
+ lur->lur_update_rec.ur_update_count = update_count -
+ last_update_count;
+ lur->lur_update_rec.ur_param_count = param_count -
+ last_param_count;
+ memcpy(&lur->lur_update_rec.ur_ops, start, cur - start);
+ lur->lur_hdr.lrh_len = llog_update_record_size(lur);
- last_update_count = update_count;
- last_param_count = param_count;
- start = src;
- lur->lur_update_rec.ur_update_count = 0;
- lur->lur_update_rec.ur_param_count = 0;
- lur->lur_hdr.lrh_len = llog_update_record_size(lur);
+ LASSERT(lur->lur_hdr.lrh_len ==
+ __llog_update_record_size(
+ __update_records_size(cur - start)));
+ LASSERT(lur->lur_hdr.lrh_len <= ctxt->loc_chunk_size);
+
+ update_records_dump(&lur->lur_update_rec, D_INFO, true);
+
+ OBD_ALLOC_PTR(stc);
+ if (stc == NULL)
+ GOTO(llog_put, rc = -ENOMEM);
+ INIT_LIST_HEAD(&stc->stc_list);
+
+ rc = llog_add(env, ctxt->loc_handle, &lur->lur_hdr,
+ &stc->stc_cookie, sub_th->st_sub_th);
+
+ CDEBUG(D_INFO, "%s: Add update log "DFID".%u: rc = %d\n",
+ dt->dd_lu_dev.ld_obd->obd_name,
+ PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
+ stc->stc_cookie.lgc_index, rc);
+
+ if (rc > 0) {
+ list_add(&stc->stc_list, &sub_th->st_cookie_list);
+ rc = 0;
+ } else {
+ OBD_FREE_PTR(stc);
+ GOTO(llog_put, rc);
}
- src = next;
- lur->lur_hdr.lrh_len += cfs_size_round(rec_len);
- if (update_count < record->lur_update_rec.ur_update_count)
- update_count++;
- else if (param_count < record->lur_update_rec.ur_param_count)
- param_count++;
- else
- break;
- } while (1);
+ last_update_count = update_count;
+ last_param_count = param_count;
+ start = cur;
+ lur->lur_update_rec.ur_update_count = 0;
+ lur->lur_update_rec.ur_param_count = 0;
+ lur->lur_update_rec.ur_flags |= UPDATE_RECORD_CONTINUE;
+ } while (!eof);
llog_put:
if (lur != NULL)
return 0;
}
-static inline int
-distribute_txn_commit_thread_running(struct lu_target *lut)
-{
- return lut->lut_tdtd_commit_thread.t_flags & SVC_RUNNING;
-}
-
-static inline int
-distribute_txn_commit_thread_stopped(struct lu_target *lut)
-{
- return lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED;
-}
-
/**
* Top thandle commit callback
*
top_multiple_thandle_dump(tmt, D_HA);
tmt->tmt_committed = 1;
lut = dt2lu_dev(tmt->tmt_master_sub_dt)->ld_site->ls_tgt;
- if (distribute_txn_commit_thread_running(lut))
- wake_up(&lut->lut_tdtd->tdtd_commit_thread_waitq);
+ if (lut->lut_tdtd && lut->lut_tdtd->tdtd_commit_task)
+ wake_up_process(lut->lut_tdtd->tdtd_commit_task);
+
RETURN_EXIT;
}
return st;
}
-/**
- * sub thandle commit callback
- *
- * Mark the sub thandle to be committed and if all sub thandle are committed
- * notify the top thandle.
- *
- * \param[in] env execution environment
- * \param[in] sub_th sub thandle being committed
- * \param[in] cb commit callback
- * \param[in] err trans result
- */
-static void sub_trans_commit_cb(struct lu_env *env,
- struct thandle *sub_th,
- struct dt_txn_commit_cb *cb, int err)
+static void sub_trans_commit_cb_internal(struct top_multiple_thandle *tmt,
+ struct thandle *sub_th, int err)
{
struct sub_thandle *st;
- struct top_multiple_thandle *tmt = cb->dcb_data;
bool all_committed = true;
- ENTRY;
/* Check if all sub thandles are committed */
+ spin_lock(&tmt->tmt_sub_lock);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_sub_th == sub_th) {
st->st_committed = 1;
if (!st->st_committed)
all_committed = false;
}
+ spin_unlock(&tmt->tmt_sub_lock);
if (tmt->tmt_result == 0)
tmt->tmt_result = err;
RETURN_EXIT;
}
+/**
+ * sub thandle commit callback
+ *
+ * Mark the sub thandle to be committed and if all sub thandle are committed
+ * notify the top thandle.
+ *
+ * \param[in] env execution environment
+ * \param[in] sub_th sub thandle being committed
+ * \param[in] cb commit callback
+ * \param[in] err trans result
+ */
+static void sub_trans_commit_cb(struct lu_env *env,
+ struct thandle *sub_th,
+ struct dt_txn_commit_cb *cb, int err)
+{
+ struct top_multiple_thandle *tmt = cb->dcb_data;
+
+ sub_trans_commit_cb_internal(tmt, sub_th, err);
+}
+
static void sub_thandle_register_commit_cb(struct sub_thandle *st,
struct top_multiple_thandle *tmt)
{
struct top_multiple_thandle *tmt = cb->dcb_data;
ENTRY;
+ spin_lock(&tmt->tmt_sub_lock);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_stopped)
continue;
break;
}
}
+ spin_unlock(&tmt->tmt_sub_lock);
wake_up(&tmt->tmt_stop_waitq);
RETURN_EXIT;
st->st_sub_th = sub_th;
sub_th->th_wait_submit = 1;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return 0;
}
child_th->th_top = &top_th->tt_super;
child_th->th_wait_submit = 1;
top_th->tt_master_sub_thandle = child_th;
-
- top_th->tt_super.th_tags |= child_th->th_tags;
}
return &top_th->tt_super;
}
{
struct target_distribute_txn_data *tdtd;
struct dt_device *dt = new->tmt_master_sub_dt;
+ struct sub_thandle *st;
LASSERT(dt != NULL);
tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
new->tmt_batchid = tdtd->tdtd_batchid++;
list_add_tail(&new->tmt_commit_list, &tdtd->tdtd_list);
spin_unlock(&tdtd->tdtd_batchid_lock);
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
}
struct dt_device *dt = new->tmt_master_sub_dt;
struct top_multiple_thandle *tmt;
struct target_distribute_txn_data *tdtd;
+ struct sub_thandle *st;
bool at_head = false;
LASSERT(dt != NULL);
list_add(&new->tmt_commit_list, &tdtd->tdtd_list);
}
spin_unlock(&tdtd->tdtd_batchid_lock);
+
+ list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
+ if (st->st_sub_th != NULL)
+ sub_thandle_register_commit_cb(st, new);
+ }
+
top_multiple_thandle_get(new);
top_multiple_thandle_dump(new, D_INFO);
- if (new->tmt_committed && at_head)
- wake_up(&tdtd->tdtd_commit_thread_waitq);
+ if (new->tmt_committed && at_head && tdtd->tdtd_commit_task)
+ wake_up_process(tdtd->tdtd_commit_task);
}
/**
ENTRY;
if (tmt == NULL) {
+ if (th->th_sync)
+ top_th->tt_master_sub_thandle->th_sync = th->th_sync;
+ if (th->th_local)
+ top_th->tt_master_sub_thandle->th_local = th->th_local;
rc = dt_trans_start(env, top_th->tt_master_sub_thandle->th_dev,
top_th->tt_master_sub_thandle);
RETURN(rc);
list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
if (st->st_sub_th == NULL)
continue;
- st->st_sub_th->th_sync = th->th_sync;
- st->st_sub_th->th_local = th->th_local;
- st->st_sub_th->th_tags = th->th_tags;
+ if (th->th_sync)
+ st->st_sub_th->th_sync = th->th_sync;
+ if (th->th_local)
+ st->st_sub_th->th_local = th->th_local;
rc = dt_trans_start(env, st->st_sub_th->th_dev,
st->st_sub_th);
if (rc != 0)
GOTO(out, rc);
- sub_thandle_register_stop_cb(st, tmt);
- sub_thandle_register_commit_cb(st, tmt);
+ LASSERT(st->st_started == 0);
+ st->st_started = 1;
}
out:
th->th_result = rc;
*/
static int top_trans_wait_result(struct top_thandle *top_th)
{
- struct l_wait_info lwi = {0};
-
- l_wait_event(top_th->tt_multiple_thandle->tmt_stop_waitq,
- top_trans_is_stopped(top_th), &lwi);
+ wait_event_idle(top_th->tt_multiple_thandle->tmt_stop_waitq,
+ top_trans_is_stopped(top_th));
RETURN(top_th->tt_super.th_result);
}
if (likely(top_th->tt_multiple_thandle == NULL)) {
LASSERT(master_dev != NULL);
+
+ if (th->th_sync)
+ top_th->tt_master_sub_thandle->th_sync = th->th_sync;
+ if (th->th_local)
+ top_th->tt_master_sub_thandle->th_local = th->th_local;
rc = dt_trans_stop(env, master_dev,
top_th->tt_master_sub_thandle);
OBD_FREE_PTR(top_th);
CERROR("%s: cannot prepare updates: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
CERROR("%s: write updates failed: rc = %d\n",
master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
+ write_updates = false;
GOTO(stop_master_trans, rc);
}
}
/* Step 2: Stop the transaction on the master MDT, and fill the
* master transno in the update logs to other MDT. */
if (master_st != NULL && master_st->st_sub_th != NULL) {
- master_st->st_sub_th->th_local = th->th_local;
- master_st->st_sub_th->th_sync = th->th_sync;
- master_st->st_sub_th->th_tags = th->th_tags;
+ if (th->th_local)
+ master_st->st_sub_th->th_local = th->th_local;
+ if (th->th_sync)
+ master_st->st_sub_th->th_sync = th->th_sync;
master_st->st_sub_th->th_result = th->th_result;
rc = dt_trans_stop(env, master_st->st_dt, master_st->st_sub_th);
+ /* If it does not write_updates, then we call submit callback
+ * here, otherwise callback is done through
+ * osd(osp)_trans_commit_cb() */
+ if (!master_st->st_started &&
+ !list_empty(&tmt->tmt_commit_list))
+ sub_trans_commit_cb_internal(tmt,
+ master_st->st_sub_th, rc);
if (rc < 0) {
+ CERROR("%s: stop trans failed: rc = %d\n",
+ master_dev->dd_lu_dev.ld_obd->obd_name, rc);
th->th_result = rc;
GOTO(stop_other_trans, rc);
} else if (tur != NULL && tur->tur_update_records != NULL) {
/* Step 3: write updates to other MDTs */
if (write_updates) {
struct llog_update_record *lur;
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_OUT_OBJECT_MISS)) {
+ if (cfs_fail_val == 1) {
+ long timeout = cfs_time_seconds(1) / 10;
+
+ OBD_RACE(OBD_FAIL_OUT_OBJECT_MISS);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(schedule_timeout(timeout));
+ cfs_fail_loc = 0;
+ }
+ cfs_fail_val++;
+ }
/* Stop callback of master will add more updates and also update
* master transno, so merge the parameters and updates into one
rc = sub_updates_write(env, lur, st);
if (rc < 0) {
+ CERROR("%s: write updates failed: rc = %d\n",
+ st->st_dt->dd_lu_dev.ld_obd->obd_name,
+ rc);
th->th_result = rc;
break;
}
if (st == master_st || st->st_sub_th == NULL)
continue;
- st->st_sub_th->th_sync = th->th_sync;
- st->st_sub_th->th_local = th->th_local;
- st->st_sub_th->th_tags = th->th_tags;
+ if (th->th_sync)
+ st->st_sub_th->th_sync = th->th_sync;
+ if (th->th_local)
+ st->st_sub_th->th_local = th->th_local;
st->st_sub_th->th_result = th->th_result;
rc = dt_trans_stop(env, st->st_sub_th->th_dev,
st->st_sub_th);
- if (unlikely(rc < 0 && th->th_result == 0))
- th->th_result = rc;
+ if (rc < 0) {
+ CERROR("%s: stop trans failed: rc = %d\n",
+ st->st_dt->dd_lu_dev.ld_obd->obd_name, rc);
+ if (th->th_result == 0)
+ th->th_result = rc;
+ }
}
rc = top_trans_wait_result(top_th);
INIT_LIST_HEAD(&tmt->tmt_sub_thandle_list);
INIT_LIST_HEAD(&tmt->tmt_commit_list);
atomic_set(&tmt->tmt_refcount, 1);
-
+ spin_lock_init(&tmt->tmt_sub_lock);
init_waitqueue_head(&tmt->tmt_stop_waitq);
+
top_th->tt_multiple_thandle = tmt;
return 0;
st->st_sub_th = sub_th;
sub_th->th_top = &top_th->tt_super;
+ sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
return st;
}
struct dt_device *sub_dt)
{
struct sub_thandle *st = NULL;
+ struct sub_thandle *master_st = NULL;
struct top_thandle *top_th;
struct thandle *sub_th = NULL;
int rc = 0;
/* Add master sub th to the top trans list */
tmt->tmt_master_sub_dt =
top_th->tt_master_sub_thandle->th_dev;
- st = create_sub_thandle_with_thandle(top_th,
- top_th->tt_master_sub_thandle);
- if (IS_ERR(st))
- GOTO(stop_trans, rc = PTR_ERR(st));
+ master_st = create_sub_thandle_with_thandle(top_th,
+ top_th->tt_master_sub_thandle);
+ if (IS_ERR(master_st)) {
+ rc = PTR_ERR(master_st);
+ master_st = NULL;
+ GOTO(stop_trans, rc);
+ }
}
/* create and init sub th to the top trans list */
st = create_sub_thandle_with_thandle(top_th, sub_th);
+ if (IS_ERR(st)) {
+ rc = PTR_ERR(st);
+ st = NULL;
+ GOTO(stop_trans, rc);
+ }
st->st_sub_th->th_wait_submit = 1;
stop_trans:
if (rc < 0) {
- if (st != NULL)
- OBD_FREE_PTR(st);
+ if (master_st != NULL) {
+ list_del(&master_st->st_sub_list);
+ OBD_FREE_PTR(master_st);
+ }
sub_th->th_result = rc;
dt_trans_stop(env, sub_dt, sub_th);
sub_th = ERR_PTR(rc);
obd = st->st_dt->dd_lu_dev.ld_obd;
ctxt = llog_get_context(obd, LLOG_UPDATELOG_ORIG_CTXT);
- LASSERT(ctxt);
+ if (ctxt == NULL)
+ continue;
list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
cookie = &stc->stc_cookie;
if (fid_is_zero(&cookie->lgc_lgl.lgl_oi.oi_fid))
rc = llog_cat_cancel_records(env, ctxt->loc_handle, 1,
cookie);
CDEBUG(D_HA, "%s: batchid %llu cancel update log "
- DOSTID ".%u : rc = %d\n", obd->obd_name,
+ DFID".%u: rc = %d\n", obd->obd_name,
tmt->tmt_batchid,
- POSTID(&cookie->lgc_lgl.lgl_oi),
+ PFID(&cookie->lgc_lgl.lgl_oi.oi_fid),
cookie->lgc_index, rc);
}
RETURN(0);
}
-/**
- * Check if there are committed transaction
- *
- * Check if there are committed transaction in the distribute transaction
- * list, then cancel the update records for those committed transaction.
- * Because the distribute transaction in the list are sorted by batchid,
- * and cancellation will be done by batchid order, so we only check the first
- * the transaction(with lowest batchid) in the list.
- *
- * \param[in] lod lod device where cancel thread is
- *
- * \retval true if it is ready
- * \retval false if it is not ready
- */
-static bool tdtd_ready_for_cancel_log(struct target_distribute_txn_data *tdtd)
-{
- struct top_multiple_thandle *tmt = NULL;
- struct obd_device *obd = tdtd->tdtd_lut->lut_obd;
- bool ready = false;
-
- spin_lock(&tdtd->tdtd_batchid_lock);
- if (!list_empty(&tdtd->tdtd_list)) {
- tmt = list_entry(tdtd->tdtd_list.next,
- struct top_multiple_thandle, tmt_commit_list);
- if (tmt->tmt_committed &&
- (!obd->obd_recovering || (obd->obd_recovering &&
- tmt->tmt_batchid <= tdtd->tdtd_committed_batchid)))
- ready = true;
- }
- spin_unlock(&tdtd->tdtd_batchid_lock);
-
- return ready;
-}
-
struct distribute_txn_bid_data {
struct dt_txn_commit_cb dtbd_cb;
struct target_distribute_txn_data *dtbd_tdtd;
struct distribute_txn_bid_data *dtbd = NULL;
struct target_distribute_txn_data *tdtd;
- dtbd = container_of0(cb, struct distribute_txn_bid_data, dtbd_cb);
+ dtbd = container_of(cb, struct distribute_txn_bid_data, dtbd_cb);
tdtd = dtbd->dtbd_tdtd;
CDEBUG(D_HA, "%s: %llu batchid updated\n",
!tdtd->tdtd_lut->lut_obd->obd_no_transno)
tdtd->tdtd_committed_batchid = dtbd->dtbd_batchid;
spin_unlock(&tdtd->tdtd_batchid_lock);
- atomic_dec(&tdtd->tdtd_refcount);
- wake_up(&tdtd->tdtd_commit_thread_waitq);
+ if (atomic_dec_and_test(&tdtd->tdtd_refcount))
+ wake_up_process(tdtd->tdtd_commit_task);
OBD_FREE_PTR(dtbd);
}
th = dt_trans_create(env, tdtd->tdtd_lut->lut_bottom);
if (IS_ERR(th)) {
+ atomic_dec(&tdtd->tdtd_refcount);
OBD_FREE_PTR(dtbd);
RETURN(PTR_ERR(th));
}
rc = dt_record_write(env, tdtd->tdtd_batchid_obj, &buf,
&off, th);
- CDEBUG(D_INFO, "%s: update batchid "LPU64": rc = %d\n",
+ CDEBUG(D_INFO, "%s: update batchid %llu: rc = %d\n",
tdtd->tdtd_lut->lut_obd->obd_name, batchid, rc);
stop:
dt_trans_stop(env, tdtd->tdtd_lut->lut_bottom, th);
- if (rc < 0)
+ if (rc < 0) {
+ atomic_dec(&tdtd->tdtd_refcount);
OBD_FREE_PTR(dtbd);
+ }
RETURN(rc);
}
out_put:
if (rc < 0 && dt_obj != NULL) {
- lu_object_put(env, &dt_obj->do_lu);
+ dt_object_put(env, dt_obj);
tdtd->tdtd_batchid_obj = NULL;
}
return rc;
}
+#ifndef TASK_IDLE
+#define TASK_IDLE TASK_INTERRUPTIBLE
+#endif
+
/**
* manage the distribute transaction thread
*
static int distribute_txn_commit_thread(void *_arg)
{
struct target_distribute_txn_data *tdtd = _arg;
- struct lu_target *lut = tdtd->tdtd_lut;
- struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
- struct l_wait_info lwi = { 0 };
- struct lu_env env;
- struct list_head list;
+ struct lu_env *env = &tdtd->tdtd_env;
+ LIST_HEAD(list);
int rc;
struct top_multiple_thandle *tmt;
struct top_multiple_thandle *tmp;
ENTRY;
- rc = lu_env_init(&env, LCT_LOCAL | LCT_MD_THREAD);
- if (rc != 0)
- RETURN(rc);
-
- spin_lock(&tdtd->tdtd_batchid_lock);
- thread->t_flags = SVC_RUNNING;
- spin_unlock(&tdtd->tdtd_batchid_lock);
- wake_up(&thread->t_ctl_waitq);
- INIT_LIST_HEAD(&list);
- CDEBUG(D_HA, "%s: start commit thread committed batchid "LPU64"\n",
+ CDEBUG(D_HA, "%s: start commit thread committed batchid %llu\n",
tdtd->tdtd_lut->lut_obd->obd_name,
tdtd->tdtd_committed_batchid);
- while (distribute_txn_commit_thread_running(lut)) {
+ while (({set_current_state(TASK_IDLE);
+ !kthread_should_stop(); })) {
spin_lock(&tdtd->tdtd_batchid_lock);
list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
tmt_commit_list) {
* the recoverying is done, unless the update records
* batchid < committed_batchid. */
if (tmt->tmt_batchid <= tdtd->tdtd_committed_batchid) {
+ __set_current_state(TASK_RUNNING);
list_move_tail(&tmt->tmt_commit_list, &list);
} else if (!tdtd->tdtd_lut->lut_obd->obd_recovering) {
+ __set_current_state(TASK_RUNNING);
LASSERTF(tmt->tmt_batchid >= batchid,
- "tmt %p tmt_batchid: "LPU64", batchid "
- LPU64"\n", tmt, tmt->tmt_batchid,
+ "tmt %p tmt_batchid: %llu, batchid "
+ "%llu\n", tmt, tmt->tmt_batchid,
batchid);
/* There are three types of distribution
* transaction result
}
spin_unlock(&tdtd->tdtd_batchid_lock);
- CDEBUG(D_HA, "%s: batchid: "LPU64" committed batchid "
- LPU64"\n", tdtd->tdtd_lut->lut_obd->obd_name, batchid,
+ CDEBUG(D_HA, "%s: batchid: %llu committed batchid "
+ "%llu\n", tdtd->tdtd_lut->lut_obd->obd_name, batchid,
tdtd->tdtd_committed_batchid);
/* update globally committed on a storage */
if (batchid > tdtd->tdtd_committed_batchid) {
- distribute_txn_commit_batchid_update(&env, tdtd,
+ rc = distribute_txn_commit_batchid_update(env, tdtd,
batchid);
- spin_lock(&tdtd->tdtd_batchid_lock);
- if (batchid > tdtd->tdtd_batchid) {
- /* This might happen during recovery,
- * batchid is initialized as last transno,
- * and the batchid in the update records
- * on other MDTs might be bigger than
- * the batchid, so we need update it to
- * avoid duplicate batchid. */
- CDEBUG(D_HA, "%s update batchid from "LPU64
- " to "LPU64"\n",
- tdtd->tdtd_lut->lut_obd->obd_name,
- tdtd->tdtd_batchid, batchid);
- tdtd->tdtd_batchid = batchid;
- }
- spin_unlock(&tdtd->tdtd_batchid_lock);
+ if (rc == 0)
+ batchid = 0;
}
/* cancel the records for committed batchid's */
/* XXX: should we postpone cancel's till the end of recovery? */
list_for_each_entry_safe(tmt, tmp, &list, tmt_commit_list) {
if (tmt->tmt_batchid > committed)
break;
+ __set_current_state(TASK_RUNNING);
list_del_init(&tmt->tmt_commit_list);
if (tmt->tmt_result <= 0)
- distribute_txn_cancel_records(&env, tmt);
+ distribute_txn_cancel_records(env, tmt);
top_multiple_thandle_put(tmt);
}
- l_wait_event(tdtd->tdtd_commit_thread_waitq,
- !distribute_txn_commit_thread_running(lut) ||
- committed < tdtd->tdtd_committed_batchid ||
- tdtd_ready_for_cancel_log(tdtd), &lwi);
- };
+ if (current->state)
+ schedule();
+
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_OUT_OBJECT_MISS)) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(5));
+ }
+ }
- l_wait_event(tdtd->tdtd_commit_thread_waitq,
- atomic_read(&tdtd->tdtd_refcount) == 0, &lwi);
+ while (({set_current_state(TASK_IDLE);
+ atomic_read(&tdtd->tdtd_refcount) != 0; }))
+ schedule();
+ __set_current_state(TASK_RUNNING);
spin_lock(&tdtd->tdtd_batchid_lock);
list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
top_multiple_thandle_dump(tmt, D_HA);
top_multiple_thandle_put(tmt);
}
-
- thread->t_flags = SVC_STOPPED;
- lu_env_fini(&env);
- wake_up(&thread->t_ctl_waitq);
-
RETURN(0);
}
__u32 index)
{
struct task_struct *task;
- struct l_wait_info lwi = { 0 };
int rc;
ENTRY;
- spin_lock_init(&tdtd->tdtd_batchid_lock);
INIT_LIST_HEAD(&tdtd->tdtd_list);
+ INIT_LIST_HEAD(&tdtd->tdtd_replay_finish_list);
+ INIT_LIST_HEAD(&tdtd->tdtd_replay_list);
+ spin_lock_init(&tdtd->tdtd_batchid_lock);
+ spin_lock_init(&tdtd->tdtd_replay_list_lock);
+ tdtd->tdtd_replay_handler = distribute_txn_replay_handle;
+ tdtd->tdtd_replay_ready = 0;
tdtd->tdtd_batchid = lut->lut_last_transno + 1;
- init_waitqueue_head(&lut->lut_tdtd_commit_thread.t_ctl_waitq);
- init_waitqueue_head(&tdtd->tdtd_commit_thread_waitq);
+ init_waitqueue_head(&tdtd->tdtd_recovery_threads_waitq);
atomic_set(&tdtd->tdtd_refcount, 0);
+ atomic_set(&tdtd->tdtd_recovery_threads_count, 0);
tdtd->tdtd_lut = lut;
+ if (lut->lut_bottom->dd_rdonly)
+ RETURN(0);
+
rc = distribute_txn_commit_batchid_init(env, tdtd);
if (rc != 0)
RETURN(rc);
- task = kthread_run(distribute_txn_commit_thread, tdtd, "tdtd-%u",
- index);
- if (IS_ERR(task))
+ rc = lu_env_init(&tdtd->tdtd_env, LCT_LOCAL | LCT_MD_THREAD);
+ if (rc)
+ RETURN(rc);
+
+ task = kthread_create(distribute_txn_commit_thread, tdtd, "dist_txn-%u",
+ index);
+ if (IS_ERR(task)) {
+ lu_env_fini(&tdtd->tdtd_env);
RETURN(PTR_ERR(task));
+ }
+ tdtd->tdtd_commit_task = task;
+ wake_up_process(task);
- l_wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
- distribute_txn_commit_thread_running(lut) ||
- distribute_txn_commit_thread_stopped(lut), &lwi);
RETURN(0);
}
EXPORT_SYMBOL(distribute_txn_init);
void distribute_txn_fini(const struct lu_env *env,
struct target_distribute_txn_data *tdtd)
{
- struct lu_target *lut = tdtd->tdtd_lut;
+ struct top_multiple_thandle *tmt;
+ LIST_HEAD(list);
/* Stop cancel thread */
- if (lut == NULL || !distribute_txn_commit_thread_running(lut))
+ if (!tdtd->tdtd_commit_task)
return;
+ kthread_stop(tdtd->tdtd_commit_task);
+ tdtd->tdtd_commit_task = NULL;
+
spin_lock(&tdtd->tdtd_batchid_lock);
- lut->lut_tdtd_commit_thread.t_flags = SVC_STOPPING;
+ list_splice_init(&tdtd->tdtd_list, &list);
spin_unlock(&tdtd->tdtd_batchid_lock);
- wake_up(&tdtd->tdtd_commit_thread_waitq);
- wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
- lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED);
+
+ CDEBUG(D_INFO, "%s stopping distribute txn commit thread.\n",
+ tdtd->tdtd_lut->lut_obd->obd_name);
+ while ((tmt = list_first_entry_or_null(&list,
+ struct top_multiple_thandle,
+ tmt_commit_list)) != NULL) {
+ list_del_init(&tmt->tmt_commit_list);
+ top_multiple_thandle_dump(tmt, D_HA);
+ top_multiple_thandle_put(tmt);
+ }
+
+ lu_env_fini(&tdtd->tdtd_env);
dtrq_list_destroy(tdtd);
if (tdtd->tdtd_batchid_obj != NULL) {
- lu_object_put(env, &tdtd->tdtd_batchid_obj->do_lu);
+ dt_object_put(env, tdtd->tdtd_batchid_obj);
tdtd->tdtd_batchid_obj = NULL;
}
}