4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2015, 2017, Intel Corporation.
26 * lustre/target/update_trans.c
28 * This file implements the update distribute transaction API.
30 * To manage the cross-MDT operation (distribute operation) transaction,
31 * the transaction will also be separated two layers on MD stack, top
32 * transaction and sub transaction.
34 * During the distribute operation, top transaction is created in the LOD
35 * layer, and represent the operation. Sub transaction is created by
36 * each OSD or OSP. Top transaction start/stop will trigger all of its sub
37 * transaction start/stop. Top transaction (the whole operation) is committed
38 * only all of its sub transaction are committed.
40 * there are three kinds of transactions
41 * 1. local transaction: All updates are in a single local OSD.
42 * 2. Remote transaction: All Updates are only in the remote OSD,
43 * i.e. locally all updates are in OSP.
44 * 3. Mixed transaction: Updates are both in local OSD and remote
47 * Author: Di Wang <di.wang@intel.com>
50 #define DEBUG_SUBSYSTEM S_CLASS
52 #include <linux/kthread.h>
53 #include <lu_target.h>
54 #include <lustre_log.h>
55 #include <lustre_update.h>
57 #include <obd_class.h>
58 #include <tgt_internal.h>
60 #include <tgt_internal.h>
62 * Dump top mulitple thandle
64 * Dump top multiple thandle and all of its sub thandle to the debug log.
66 * \param[in]mask debug mask
67 * \param[in]top_th top_thandle to be dumped
69 static void top_multiple_thandle_dump(struct top_multiple_thandle *tmt,
72 struct sub_thandle *st;
74 LASSERT(tmt->tmt_magic == TOP_THANDLE_MAGIC);
75 CDEBUG(mask, "%s tmt %p refcount %d committed %d result %d batchid %llu\n",
76 tmt->tmt_master_sub_dt ?
77 tmt->tmt_master_sub_dt->dd_lu_dev.ld_obd->obd_name :
79 tmt, atomic_read(&tmt->tmt_refcount), tmt->tmt_committed,
80 tmt->tmt_result, tmt->tmt_batchid);
82 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
83 struct sub_thandle_cookie *stc;
85 CDEBUG(mask, "st %p obd %s committed %d stopped %d sub_th %p\n",
86 st, st->st_dt->dd_lu_dev.ld_obd->obd_name,
87 st->st_committed, st->st_stopped, st->st_sub_th);
89 list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
90 CDEBUG(mask, " cookie "DFID".%u\n",
91 PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
92 stc->stc_cookie.lgc_index);
98 * Declare write update to sub device
100 * Declare Write updates llog records to the sub device during distribute
103 * \param[in] env execution environment
104 * \param[in] record update records being written
105 * \param[in] sub_th sub transaction handle
106 * \param[in] record_size total update record size
108 * \retval 0 if writing succeeds
109 * \retval negative errno if writing fails
111 static int sub_declare_updates_write(const struct lu_env *env,
112 struct llog_update_record *record,
113 struct thandle *sub_th, size_t record_size)
115 struct llog_ctxt *ctxt;
116 struct dt_device *dt = sub_th->th_dev;
117 int left = record_size;
120 /* If ctxt is NULL, it means not need to write update,
121 * for example if the the OSP is used to connect to OST */
122 ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
123 LLOG_UPDATELOG_ORIG_CTXT);
125 /* Not ready to record updates yet. */
126 if (ctxt == NULL || ctxt->loc_handle == NULL) {
131 rc = llog_declare_add(env, ctxt->loc_handle,
132 &record->lur_hdr, sub_th);
136 while (left > ctxt->loc_chunk_size) {
137 rc = llog_declare_add(env, ctxt->loc_handle,
138 &record->lur_hdr, sub_th);
142 left -= ctxt->loc_chunk_size;
152 * write update to sub device
154 * Write llog update record to the sub device during distribute
155 * transaction. If it succeeds, llog cookie of the record will be
156 * returned by @cookie.
158 * \param[in] env execution environment
159 * \param[in] record update records being written
160 * \param[in] sub_th sub transaction handle
161 * \param[out] cookie llog cookie of the update record.
163 * \retval 1 if writing succeeds
164 * \retval negative errno if writing fails
166 static int sub_updates_write(const struct lu_env *env,
167 struct llog_update_record *record,
168 struct sub_thandle *sub_th)
170 struct dt_device *dt = sub_th->st_dt;
171 struct llog_ctxt *ctxt;
172 struct llog_update_record *lur = NULL;
173 __u32 update_count = 0;
174 __u32 param_count = 0;
175 __u32 last_update_count = 0;
176 __u32 last_param_count = 0;
180 struct sub_thandle_cookie *stc;
186 ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
187 LLOG_UPDATELOG_ORIG_CTXT);
188 /* If ctxt == NULL, then it means updates on OST (only happens
189 * during migration), and we do not track those updates for now */
190 /* If ctxt->loc_handle == NULL, then it does not need to record
191 * update, usually happens in error handler path */
192 if (ctxt == NULL || ctxt->loc_handle == NULL) {
197 /* Since the cross-MDT updates will includes both local
198 * and remote updates, the update ops count must > 1 */
199 LASSERT(record->lur_update_rec.ur_update_count > 1);
200 LASSERTF(record->lur_hdr.lrh_len == llog_update_record_size(record),
201 "lrh_len %u record_size %zu\n", record->lur_hdr.lrh_len,
202 llog_update_record_size(record));
205 * If its size > llog chunk_size, then write current chunk to the update
206 * llog, NB the padding should >= LLOG_MIN_REC_SIZE.
208 * So check padding length is either >= LLOG_MIN_REC_SIZE or is 0
209 * (record length just matches the chunk size).
212 reclen = record->lur_hdr.lrh_len;
213 if (reclen + LLOG_MIN_REC_SIZE <= ctxt->loc_chunk_size ||
214 reclen == ctxt->loc_chunk_size) {
217 GOTO(llog_put, rc = -ENOMEM);
218 INIT_LIST_HEAD(&stc->stc_list);
220 rc = llog_add(env, ctxt->loc_handle, &record->lur_hdr,
221 &stc->stc_cookie, sub_th->st_sub_th);
223 CDEBUG(D_INFO, "%s: Add update log "DFID".%u: rc = %d\n",
224 dt->dd_lu_dev.ld_obd->obd_name,
225 PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
226 stc->stc_cookie.lgc_index, rc);
229 list_add(&stc->stc_list, &sub_th->st_cookie_list);
238 /* Split the records into chunk_size update record */
239 OBD_ALLOC_LARGE(lur, ctxt->loc_chunk_size);
241 GOTO(llog_put, rc = -ENOMEM);
243 memcpy(lur, &record->lur_hdr, sizeof(record->lur_hdr));
244 lur->lur_update_rec.ur_update_count = 0;
245 lur->lur_update_rec.ur_param_count = 0;
246 start = (char *)&record->lur_update_rec.ur_ops;
249 if (update_count < record->lur_update_rec.ur_update_count)
250 next = (char *)update_op_next_op(
251 (struct update_op *)cur);
252 else if (param_count < record->lur_update_rec.ur_param_count)
253 next = (char *)update_param_next_param(
254 (struct update_param *)cur);
258 reclen = __llog_update_record_size(
259 __update_records_size(next - start));
260 if ((reclen + LLOG_MIN_REC_SIZE <= ctxt->loc_chunk_size ||
261 reclen == ctxt->loc_chunk_size) &&
266 record->lur_update_rec.ur_update_count)
268 else if (param_count <
269 record->lur_update_rec.ur_param_count)
274 lur->lur_update_rec.ur_update_count = update_count -
276 lur->lur_update_rec.ur_param_count = param_count -
278 memcpy(&lur->lur_update_rec.ur_ops, start, cur - start);
279 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
281 LASSERT(lur->lur_hdr.lrh_len ==
282 __llog_update_record_size(
283 __update_records_size(cur - start)));
284 LASSERT(lur->lur_hdr.lrh_len <= ctxt->loc_chunk_size);
286 update_records_dump(&lur->lur_update_rec, D_INFO, true);
290 GOTO(llog_put, rc = -ENOMEM);
291 INIT_LIST_HEAD(&stc->stc_list);
293 rc = llog_add(env, ctxt->loc_handle, &lur->lur_hdr,
294 &stc->stc_cookie, sub_th->st_sub_th);
296 CDEBUG(D_INFO, "%s: Add update log "DFID".%u: rc = %d\n",
297 dt->dd_lu_dev.ld_obd->obd_name,
298 PFID(&stc->stc_cookie.lgc_lgl.lgl_oi.oi_fid),
299 stc->stc_cookie.lgc_index, rc);
302 list_add(&stc->stc_list, &sub_th->st_cookie_list);
309 last_update_count = update_count;
310 last_param_count = param_count;
312 lur->lur_update_rec.ur_update_count = 0;
313 lur->lur_update_rec.ur_param_count = 0;
314 lur->lur_update_rec.ur_flags |= UPDATE_RECORD_CONTINUE;
319 OBD_FREE_LARGE(lur, ctxt->loc_chunk_size);
326 * Prepare the update records.
328 * Merge params and ops into the update records, then initializing
331 * During transaction execution phase, parameters and update ops
332 * are collected in two different buffers (see lod_updates_pack()),
333 * during transaction stop, it needs to be merged in one buffer,
334 * so it will be written in the update log.
336 * \param[in] env execution environment
337 * \param[in] tmt top_multiple_thandle for distribute txn
339 * \retval 0 if merging succeeds.
340 * \retval negaitive errno if merging fails.
342 static int prepare_writing_updates(const struct lu_env *env,
343 struct top_multiple_thandle *tmt)
345 struct thandle_update_records *tur = tmt->tmt_update_records;
346 struct llog_update_record *lur;
347 struct update_params *params;
351 if (tur == NULL || tur->tur_update_records == NULL ||
352 tur->tur_update_params == NULL)
355 lur = tur->tur_update_records;
356 /* Extends the update records buffer if needed */
357 params_size = update_params_size(tur->tur_update_params,
358 tur->tur_update_param_count);
359 LASSERT(lur->lur_update_rec.ur_param_count == 0);
360 update_size = llog_update_record_size(lur);
361 if (cfs_size_round(update_size + params_size) >
362 tur->tur_update_records_buf_size) {
365 rc = tur_update_records_extend(tur,
366 cfs_size_round(update_size + params_size));
370 lur = tur->tur_update_records;
373 params = update_records_get_params(&lur->lur_update_rec);
374 memcpy(params, tur->tur_update_params, params_size);
376 lur->lur_update_rec.ur_param_count = tur->tur_update_param_count;
377 lur->lur_update_rec.ur_batchid = tmt->tmt_batchid;
378 /* Init update record header */
379 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
380 lur->lur_hdr.lrh_type = UPDATE_REC;
382 /* Dump updates for debugging purpose */
383 update_records_dump(&lur->lur_update_rec, D_INFO, true);
389 distribute_txn_commit_thread_running(struct lu_target *lut)
391 return lut->lut_tdtd_commit_thread.t_flags & SVC_RUNNING;
395 distribute_txn_commit_thread_stopped(struct lu_target *lut)
397 return lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED;
401 * Top thandle commit callback
403 * This callback will be called when all of sub transactions are committed.
405 * \param[in] th top thandle to be committed.
407 static void top_trans_committed_cb(struct top_multiple_thandle *tmt)
409 struct lu_target *lut;
412 LASSERT(atomic_read(&tmt->tmt_refcount) > 0);
414 top_multiple_thandle_dump(tmt, D_HA);
415 tmt->tmt_committed = 1;
416 lut = dt2lu_dev(tmt->tmt_master_sub_dt)->ld_site->ls_tgt;
417 if (distribute_txn_commit_thread_running(lut))
418 wake_up(&lut->lut_tdtd->tdtd_commit_thread_waitq);
422 struct sub_thandle *lookup_sub_thandle(struct top_multiple_thandle *tmt,
423 struct dt_device *dt_dev)
425 struct sub_thandle *st;
427 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
428 if (st->st_dt == dt_dev)
433 EXPORT_SYMBOL(lookup_sub_thandle);
435 struct sub_thandle *create_sub_thandle(struct top_multiple_thandle *tmt,
436 struct dt_device *dt_dev)
438 struct sub_thandle *st;
442 RETURN(ERR_PTR(-ENOMEM));
444 INIT_LIST_HEAD(&st->st_sub_list);
445 INIT_LIST_HEAD(&st->st_cookie_list);
448 list_add(&st->st_sub_list, &tmt->tmt_sub_thandle_list);
452 static void sub_trans_commit_cb_internal(struct top_multiple_thandle *tmt,
453 struct thandle *sub_th, int err)
455 struct sub_thandle *st;
456 bool all_committed = true;
458 /* Check if all sub thandles are committed */
459 spin_lock(&tmt->tmt_sub_lock);
460 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
461 if (st->st_sub_th == sub_th) {
462 st->st_committed = 1;
465 if (!st->st_committed)
466 all_committed = false;
468 spin_unlock(&tmt->tmt_sub_lock);
470 if (tmt->tmt_result == 0)
471 tmt->tmt_result = err;
474 top_trans_committed_cb(tmt);
476 top_multiple_thandle_dump(tmt, D_INFO);
477 top_multiple_thandle_put(tmt);
482 * sub thandle commit callback
484 * Mark the sub thandle to be committed and if all sub thandle are committed
485 * notify the top thandle.
487 * \param[in] env execution environment
488 * \param[in] sub_th sub thandle being committed
489 * \param[in] cb commit callback
490 * \param[in] err trans result
492 static void sub_trans_commit_cb(struct lu_env *env,
493 struct thandle *sub_th,
494 struct dt_txn_commit_cb *cb, int err)
496 struct top_multiple_thandle *tmt = cb->dcb_data;
498 sub_trans_commit_cb_internal(tmt, sub_th, err);
501 static void sub_thandle_register_commit_cb(struct sub_thandle *st,
502 struct top_multiple_thandle *tmt)
504 LASSERT(st->st_sub_th != NULL);
505 top_multiple_thandle_get(tmt);
506 st->st_commit_dcb.dcb_func = sub_trans_commit_cb;
507 st->st_commit_dcb.dcb_data = tmt;
508 INIT_LIST_HEAD(&st->st_commit_dcb.dcb_linkage);
509 dt_trans_cb_add(st->st_sub_th, &st->st_commit_dcb);
513 * Sub thandle stop call back
515 * After sub thandle is stopped, it will call this callback to notify
518 * \param[in] th sub thandle to be stopped
519 * \param[in] rc result of sub trans
521 static void sub_trans_stop_cb(struct lu_env *env,
522 struct thandle *sub_th,
523 struct dt_txn_commit_cb *cb, int err)
525 struct sub_thandle *st;
526 struct top_multiple_thandle *tmt = cb->dcb_data;
529 spin_lock(&tmt->tmt_sub_lock);
530 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
534 if (st->st_dt == sub_th->th_dev) {
540 spin_unlock(&tmt->tmt_sub_lock);
542 wake_up(&tmt->tmt_stop_waitq);
546 static void sub_thandle_register_stop_cb(struct sub_thandle *st,
547 struct top_multiple_thandle *tmt)
549 st->st_stop_dcb.dcb_func = sub_trans_stop_cb;
550 st->st_stop_dcb.dcb_data = tmt;
551 st->st_stop_dcb.dcb_flags = DCB_TRANS_STOP;
552 INIT_LIST_HEAD(&st->st_stop_dcb.dcb_linkage);
553 dt_trans_cb_add(st->st_sub_th, &st->st_stop_dcb);
559 * Create transaction handle for sub_thandle
561 * \param[in] env execution environment
562 * \param[in] th top thandle
563 * \param[in] st sub_thandle
565 * \retval 0 if creation succeeds.
566 * \retval negative errno if creation fails.
568 int sub_thandle_trans_create(const struct lu_env *env,
569 struct top_thandle *top_th,
570 struct sub_thandle *st)
572 struct thandle *sub_th;
574 sub_th = dt_trans_create(env, st->st_dt);
576 return PTR_ERR(sub_th);
578 sub_th->th_top = &top_th->tt_super;
579 st->st_sub_th = sub_th;
581 sub_th->th_wait_submit = 1;
582 sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
587 * Create the top transaction.
589 * Create the top transaction on the master device. It will create a top
590 * thandle and a sub thandle on the master device.
592 * \param[in] env execution environment
593 * \param[in] master_dev master_dev the top thandle will be created
595 * \retval pointer to the created thandle.
596 * \retval ERR_PTR(errno) if creation failed.
599 top_trans_create(const struct lu_env *env, struct dt_device *master_dev)
601 struct top_thandle *top_th;
602 struct thandle *child_th;
604 OBD_ALLOC_GFP(top_th, sizeof(*top_th), __GFP_IO);
606 return ERR_PTR(-ENOMEM);
608 top_th->tt_super.th_top = &top_th->tt_super;
610 if (master_dev != NULL) {
611 child_th = dt_trans_create(env, master_dev);
612 if (IS_ERR(child_th)) {
613 OBD_FREE_PTR(top_th);
617 child_th->th_top = &top_th->tt_super;
618 child_th->th_wait_submit = 1;
619 top_th->tt_master_sub_thandle = child_th;
621 return &top_th->tt_super;
623 EXPORT_SYMBOL(top_trans_create);
626 * Declare write update transaction
628 * Check if there are updates being recorded in this transaction,
629 * it will write the record into the disk.
631 * \param[in] env execution environment
632 * \param[in] tmt top multiple transaction handle
634 * \retval 0 if writing succeeds
635 * \retval negative errno if writing fails
637 static int declare_updates_write(const struct lu_env *env,
638 struct top_multiple_thandle *tmt)
640 struct llog_update_record *record;
641 struct sub_thandle *st;
644 record = tmt->tmt_update_records->tur_update_records;
645 /* Declare update write for all other target */
646 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
647 if (st->st_sub_th == NULL)
650 rc = sub_declare_updates_write(env, record, st->st_sub_th,
651 tmt->tmt_record_size);
660 * Assign batchid to the distribute transaction.
662 * Assign batchid to the distribute transaction
664 * \param[in] tmt distribute transaction
666 static void distribute_txn_assign_batchid(struct top_multiple_thandle *new)
668 struct target_distribute_txn_data *tdtd;
669 struct dt_device *dt = new->tmt_master_sub_dt;
670 struct sub_thandle *st;
673 tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
674 spin_lock(&tdtd->tdtd_batchid_lock);
675 new->tmt_batchid = tdtd->tdtd_batchid++;
676 list_add_tail(&new->tmt_commit_list, &tdtd->tdtd_list);
677 spin_unlock(&tdtd->tdtd_batchid_lock);
678 list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
679 if (st->st_sub_th != NULL)
680 sub_thandle_register_commit_cb(st, new);
682 top_multiple_thandle_get(new);
683 top_multiple_thandle_dump(new, D_INFO);
687 * Insert distribute transaction to the distribute txn list.
689 * Insert distribute transaction to the distribute txn list.
691 * \param[in] new the distribute txn to be inserted.
693 void distribute_txn_insert_by_batchid(struct top_multiple_thandle *new)
695 struct dt_device *dt = new->tmt_master_sub_dt;
696 struct top_multiple_thandle *tmt;
697 struct target_distribute_txn_data *tdtd;
698 struct sub_thandle *st;
699 bool at_head = false;
702 tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
704 spin_lock(&tdtd->tdtd_batchid_lock);
705 list_for_each_entry_reverse(tmt, &tdtd->tdtd_list, tmt_commit_list) {
706 if (new->tmt_batchid > tmt->tmt_batchid) {
707 list_add(&new->tmt_commit_list, &tmt->tmt_commit_list);
711 if (list_empty(&new->tmt_commit_list)) {
713 list_add(&new->tmt_commit_list, &tdtd->tdtd_list);
715 spin_unlock(&tdtd->tdtd_batchid_lock);
717 list_for_each_entry(st, &new->tmt_sub_thandle_list, st_sub_list) {
718 if (st->st_sub_th != NULL)
719 sub_thandle_register_commit_cb(st, new);
722 top_multiple_thandle_get(new);
723 top_multiple_thandle_dump(new, D_INFO);
724 if (new->tmt_committed && at_head)
725 wake_up(&tdtd->tdtd_commit_thread_waitq);
729 * Prepare cross-MDT operation.
731 * Create the update record buffer to record updates for cross-MDT operation,
732 * add master sub transaction to tt_sub_trans_list, and declare the update
735 * During updates packing, all of parameters will be packed in
736 * tur_update_params, and updates will be packed in tur_update_records.
737 * Then in transaction stop, parameters and updates will be merged
738 * into one updates buffer.
740 * And also master thandle will be added to the sub_th list, so it will be
741 * easy to track the commit status.
743 * \param[in] env execution environment
744 * \param[in] th top transaction handle
746 * \retval 0 if preparation succeeds.
747 * \retval negative errno if preparation fails.
749 static int prepare_multiple_node_trans(const struct lu_env *env,
750 struct top_multiple_thandle *tmt)
752 struct thandle_update_records *tur;
756 if (tmt->tmt_update_records == NULL) {
757 tur = &update_env_info(env)->uti_tur;
758 rc = check_and_prepare_update_record(env, tur);
762 tmt->tmt_update_records = tur;
763 distribute_txn_assign_batchid(tmt);
766 rc = declare_updates_write(env, tmt);
772 * start the top transaction.
774 * Start all of its sub transactions, then start master sub transaction.
776 * \param[in] env execution environment
777 * \param[in] master_dev master_dev the top thandle will be start
778 * \param[in] th top thandle
780 * \retval 0 if transaction start succeeds.
781 * \retval negative errno if start fails.
783 int top_trans_start(const struct lu_env *env, struct dt_device *master_dev,
786 struct top_thandle *top_th = container_of(th, struct top_thandle,
788 struct sub_thandle *st;
789 struct top_multiple_thandle *tmt = top_th->tt_multiple_thandle;
795 top_th->tt_master_sub_thandle->th_sync = th->th_sync;
797 top_th->tt_master_sub_thandle->th_local = th->th_local;
798 rc = dt_trans_start(env, top_th->tt_master_sub_thandle->th_dev,
799 top_th->tt_master_sub_thandle);
803 tmt = top_th->tt_multiple_thandle;
804 rc = prepare_multiple_node_trans(env, tmt);
808 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
809 if (st->st_sub_th == NULL)
812 st->st_sub_th->th_sync = th->th_sync;
814 st->st_sub_th->th_local = th->th_local;
815 rc = dt_trans_start(env, st->st_sub_th->th_dev,
820 LASSERT(st->st_started == 0);
827 EXPORT_SYMBOL(top_trans_start);
830 * Check whether we need write updates record
832 * Check if the updates for the top_thandle needs to be writen
833 * to all targets. Only if the transaction succeeds and the updates
834 * number > 2, it will write the updates,
836 * \params [in] top_th top thandle.
838 * \retval true if it needs to write updates
839 * \retval false if it does not need to write updates
841 static bool top_check_write_updates(struct top_thandle *top_th)
843 struct top_multiple_thandle *tmt;
844 struct thandle_update_records *tur;
846 /* Do not write updates to records if the transaction fails */
847 if (top_th->tt_super.th_result != 0)
850 tmt = top_th->tt_multiple_thandle;
854 tur = tmt->tmt_update_records;
858 /* Hmm, false update records, since the cross-MDT operation
859 * should includes both local and remote updates, so the
860 * updates count should >= 2 */
861 if (tur->tur_update_records == NULL ||
862 tur->tur_update_records->lur_update_rec.ur_update_count <= 1)
869 * Check if top transaction is stopped
871 * Check if top transaction is stopped, only if all sub transaction
872 * is stopped, then the top transaction is stopped.
874 * \param [in] top_th top thandle
876 * \retval true if the top transaction is stopped.
877 * \retval false if the top transaction is not stopped.
879 static bool top_trans_is_stopped(struct top_thandle *top_th)
881 struct top_multiple_thandle *tmt;
882 struct sub_thandle *st;
883 bool all_stopped = true;
885 tmt = top_th->tt_multiple_thandle;
886 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
887 if (!st->st_stopped && st->st_sub_th != NULL) {
892 if (st->st_result != 0 &&
893 top_th->tt_super.th_result == 0)
894 top_th->tt_super.th_result = st->st_result;
901 * Wait result of top transaction
903 * Wait until all sub transaction get its result.
905 * \param [in] top_th top thandle.
907 * \retval the result of top thandle.
909 static int top_trans_wait_result(struct top_thandle *top_th)
911 struct l_wait_info lwi = {0};
913 l_wait_event(top_th->tt_multiple_thandle->tmt_stop_waitq,
914 top_trans_is_stopped(top_th), &lwi);
916 RETURN(top_th->tt_super.th_result);
920 * Stop the top transaction.
922 * Stop the transaction on the master device first, then stop transactions
923 * on other sub devices.
925 * \param[in] env execution environment
926 * \param[in] master_dev master_dev the top thandle will be created
927 * \param[in] th top thandle
929 * \retval 0 if stop transaction succeeds.
930 * \retval negative errno if stop transaction fails.
932 int top_trans_stop(const struct lu_env *env, struct dt_device *master_dev,
935 struct top_thandle *top_th = container_of(th, struct top_thandle,
937 struct sub_thandle *st;
938 struct sub_thandle *master_st;
939 struct top_multiple_thandle *tmt;
940 struct thandle_update_records *tur;
941 bool write_updates = false;
945 if (likely(top_th->tt_multiple_thandle == NULL)) {
946 LASSERT(master_dev != NULL);
949 top_th->tt_master_sub_thandle->th_sync = th->th_sync;
951 top_th->tt_master_sub_thandle->th_local = th->th_local;
952 rc = dt_trans_stop(env, master_dev,
953 top_th->tt_master_sub_thandle);
954 OBD_FREE_PTR(top_th);
958 tmt = top_th->tt_multiple_thandle;
959 tur = tmt->tmt_update_records;
961 /* Note: we need stop the master thandle first, then the stop
962 * callback will fill the master transno in the update logs,
963 * then these update logs will be sent to other MDTs */
964 /* get the master sub thandle */
965 master_st = lookup_sub_thandle(tmt, tmt->tmt_master_sub_dt);
966 write_updates = top_check_write_updates(top_th);
968 /* Step 1: write the updates log on Master MDT */
969 if (master_st != NULL && master_st->st_sub_th != NULL &&
971 struct llog_update_record *lur;
973 /* Merge the parameters and updates into one buffer */
974 rc = prepare_writing_updates(env, tmt);
976 CERROR("%s: cannot prepare updates: rc = %d\n",
977 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
979 write_updates = false;
980 GOTO(stop_master_trans, rc);
983 lur = tur->tur_update_records;
984 /* Write updates to the master MDT */
985 rc = sub_updates_write(env, lur, master_st);
987 /* Cleanup the common parameters in the update records,
988 * master transno callback might add more parameters.
989 * and we need merge the update records again in the
991 if (tur->tur_update_params != NULL)
992 lur->lur_update_rec.ur_param_count = 0;
995 CERROR("%s: write updates failed: rc = %d\n",
996 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
998 write_updates = false;
999 GOTO(stop_master_trans, rc);
1004 /* Step 2: Stop the transaction on the master MDT, and fill the
1005 * master transno in the update logs to other MDT. */
1006 if (master_st != NULL && master_st->st_sub_th != NULL) {
1008 master_st->st_sub_th->th_local = th->th_local;
1010 master_st->st_sub_th->th_sync = th->th_sync;
1011 master_st->st_sub_th->th_result = th->th_result;
1012 rc = dt_trans_stop(env, master_st->st_dt, master_st->st_sub_th);
1013 /* If it does not write_updates, then we call submit callback
1014 * here, otherwise callback is done through
1015 * osd(osp)_trans_commit_cb() */
1016 if (!master_st->st_started &&
1017 !list_empty(&tmt->tmt_commit_list))
1018 sub_trans_commit_cb_internal(tmt,
1019 master_st->st_sub_th, rc);
1022 GOTO(stop_other_trans, rc);
1023 } else if (tur != NULL && tur->tur_update_records != NULL) {
1024 struct llog_update_record *lur;
1026 lur = tur->tur_update_records;
1027 if (lur->lur_update_rec.ur_master_transno == 0)
1028 /* Update master transno after master stop
1030 lur->lur_update_rec.ur_master_transno =
1031 tgt_th_info(env)->tti_transno;
1035 /* Step 3: write updates to other MDTs */
1036 if (write_updates) {
1037 struct llog_update_record *lur;
1039 /* Stop callback of master will add more updates and also update
1040 * master transno, so merge the parameters and updates into one
1042 rc = prepare_writing_updates(env, tmt);
1044 CERROR("%s: prepare updates failed: rc = %d\n",
1045 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
1047 GOTO(stop_other_trans, rc);
1049 lur = tur->tur_update_records;
1050 list_for_each_entry(st, &tmt->tmt_sub_thandle_list,
1052 if (st->st_sub_th == NULL || st == master_st ||
1053 st->st_sub_th->th_result < 0)
1056 rc = sub_updates_write(env, lur, st);
1065 /* Step 4: Stop the transaction on other MDTs */
1066 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
1067 if (st == master_st || st->st_sub_th == NULL)
1071 st->st_sub_th->th_sync = th->th_sync;
1073 st->st_sub_th->th_local = th->th_local;
1074 st->st_sub_th->th_result = th->th_result;
1075 rc = dt_trans_stop(env, st->st_sub_th->th_dev,
1077 if (unlikely(rc < 0 && th->th_result == 0))
1081 rc = top_trans_wait_result(top_th);
1083 tmt->tmt_result = rc;
1085 /* Balance for the refcount in top_trans_create, Note: if it is NOT
1086 * multiple node transaction, the top transaction will be destroyed. */
1087 top_multiple_thandle_put(tmt);
1088 OBD_FREE_PTR(top_th);
1091 EXPORT_SYMBOL(top_trans_stop);
1094 * Create top_multiple_thandle for top_thandle
1096 * Create top_mutilple_thandle to manage the mutiple node transaction
1097 * for top_thandle, and it also needs to add master sub thandle to the
1098 * sub trans list now.
1100 * \param[in] env execution environment
1101 * \param[in] top_th the top thandle
1103 * \retval 0 if creation succeeds
1104 * \retval negative errno if creation fails
1106 int top_trans_create_tmt(const struct lu_env *env,
1107 struct top_thandle *top_th)
1109 struct top_multiple_thandle *tmt;
1115 tmt->tmt_magic = TOP_THANDLE_MAGIC;
1116 INIT_LIST_HEAD(&tmt->tmt_sub_thandle_list);
1117 INIT_LIST_HEAD(&tmt->tmt_commit_list);
1118 atomic_set(&tmt->tmt_refcount, 1);
1119 spin_lock_init(&tmt->tmt_sub_lock);
1120 init_waitqueue_head(&tmt->tmt_stop_waitq);
1122 top_th->tt_multiple_thandle = tmt;
1127 static struct sub_thandle *
1128 create_sub_thandle_with_thandle(struct top_thandle *top_th,
1129 struct thandle *sub_th)
1131 struct sub_thandle *st;
1133 /* create and init sub th to the top trans list */
1134 st = create_sub_thandle(top_th->tt_multiple_thandle,
1139 st->st_sub_th = sub_th;
1141 sub_th->th_top = &top_th->tt_super;
1142 sub_thandle_register_stop_cb(st, top_th->tt_multiple_thandle);
1149 * Get sub thandle from the top thandle according to the sub dt_device.
1151 * \param[in] env execution environment
1152 * \param[in] th thandle on the top layer.
1153 * \param[in] sub_dt sub dt_device used to get sub transaction
1155 * \retval thandle of sub transaction if succeed
1156 * \retval PTR_ERR(errno) if failed
1158 struct thandle *thandle_get_sub_by_dt(const struct lu_env *env,
1160 struct dt_device *sub_dt)
1162 struct sub_thandle *st = NULL;
1163 struct sub_thandle *master_st = NULL;
1164 struct top_thandle *top_th;
1165 struct thandle *sub_th = NULL;
1169 top_th = container_of(th, struct top_thandle, tt_super);
1171 if (likely(sub_dt == top_th->tt_master_sub_thandle->th_dev))
1172 RETURN(top_th->tt_master_sub_thandle);
1174 if (top_th->tt_multiple_thandle != NULL) {
1175 st = lookup_sub_thandle(top_th->tt_multiple_thandle, sub_dt);
1177 RETURN(st->st_sub_th);
1180 sub_th = dt_trans_create(env, sub_dt);
1184 /* Create top_multiple_thandle if necessary */
1185 if (top_th->tt_multiple_thandle == NULL) {
1186 struct top_multiple_thandle *tmt;
1188 rc = top_trans_create_tmt(env, top_th);
1190 GOTO(stop_trans, rc);
1192 tmt = top_th->tt_multiple_thandle;
1194 /* Add master sub th to the top trans list */
1195 tmt->tmt_master_sub_dt =
1196 top_th->tt_master_sub_thandle->th_dev;
1197 master_st = create_sub_thandle_with_thandle(top_th,
1198 top_th->tt_master_sub_thandle);
1199 if (IS_ERR(master_st)) {
1200 rc = PTR_ERR(master_st);
1202 GOTO(stop_trans, rc);
1206 /* create and init sub th to the top trans list */
1207 st = create_sub_thandle_with_thandle(top_th, sub_th);
1211 GOTO(stop_trans, rc);
1213 st->st_sub_th->th_wait_submit = 1;
1216 if (master_st != NULL) {
1217 list_del(&master_st->st_sub_list);
1218 OBD_FREE_PTR(master_st);
1220 sub_th->th_result = rc;
1221 dt_trans_stop(env, sub_dt, sub_th);
1222 sub_th = ERR_PTR(rc);
1227 EXPORT_SYMBOL(thandle_get_sub_by_dt);
1230 * Top multiple thandle destroy
1232 * Destroy multiple thandle and all its sub thandle.
1234 * \param[in] tmt top_multiple_thandle to be destroyed.
1236 void top_multiple_thandle_destroy(struct top_multiple_thandle *tmt)
1238 struct sub_thandle *st;
1239 struct sub_thandle *tmp;
1241 LASSERT(tmt->tmt_magic == TOP_THANDLE_MAGIC);
1242 list_for_each_entry_safe(st, tmp, &tmt->tmt_sub_thandle_list,
1244 struct sub_thandle_cookie *stc;
1245 struct sub_thandle_cookie *tmp;
1247 list_del(&st->st_sub_list);
1248 list_for_each_entry_safe(stc, tmp, &st->st_cookie_list,
1250 list_del(&stc->stc_list);
1257 EXPORT_SYMBOL(top_multiple_thandle_destroy);
1260 * Cancel the update log on MDTs
1262 * Cancel the update log on MDTs then destroy the thandle.
1264 * \param[in] env execution environment
1265 * \param[in] tmt the top multiple thandle whose updates records
1266 * will be cancelled.
1268 * \retval 0 if cancellation succeeds.
1269 * \retval negative errno if cancellation fails.
1271 static int distribute_txn_cancel_records(const struct lu_env *env,
1272 struct top_multiple_thandle *tmt)
1274 struct sub_thandle *st;
1277 top_multiple_thandle_dump(tmt, D_INFO);
1278 /* Cancel update logs on other MDTs */
1279 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
1280 struct llog_ctxt *ctxt;
1281 struct obd_device *obd;
1282 struct llog_cookie *cookie;
1283 struct sub_thandle_cookie *stc;
1286 obd = st->st_dt->dd_lu_dev.ld_obd;
1287 ctxt = llog_get_context(obd, LLOG_UPDATELOG_ORIG_CTXT);
1290 list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
1291 cookie = &stc->stc_cookie;
1292 if (fid_is_zero(&cookie->lgc_lgl.lgl_oi.oi_fid))
1295 rc = llog_cat_cancel_records(env, ctxt->loc_handle, 1,
1297 CDEBUG(D_HA, "%s: batchid %llu cancel update log "
1298 DFID".%u: rc = %d\n", obd->obd_name,
1300 PFID(&cookie->lgc_lgl.lgl_oi.oi_fid),
1301 cookie->lgc_index, rc);
1304 llog_ctxt_put(ctxt);
1311 * Check if there are committed transaction
1313 * Check if there are committed transaction in the distribute transaction
1314 * list, then cancel the update records for those committed transaction.
1315 * Because the distribute transaction in the list are sorted by batchid,
1316 * and cancellation will be done by batchid order, so we only check the first
1317 * the transaction(with lowest batchid) in the list.
1319 * \param[in] lod lod device where cancel thread is
1321 * \retval true if it is ready
1322 * \retval false if it is not ready
1324 static bool tdtd_ready_for_cancel_log(struct target_distribute_txn_data *tdtd)
1326 struct top_multiple_thandle *tmt = NULL;
1327 struct obd_device *obd = tdtd->tdtd_lut->lut_obd;
1330 spin_lock(&tdtd->tdtd_batchid_lock);
1331 if (!list_empty(&tdtd->tdtd_list)) {
1332 tmt = list_entry(tdtd->tdtd_list.next,
1333 struct top_multiple_thandle, tmt_commit_list);
1334 if (tmt->tmt_committed &&
1335 (!obd->obd_recovering || (obd->obd_recovering &&
1336 tmt->tmt_batchid <= tdtd->tdtd_committed_batchid)))
1339 spin_unlock(&tdtd->tdtd_batchid_lock);
1344 struct distribute_txn_bid_data {
1345 struct dt_txn_commit_cb dtbd_cb;
1346 struct target_distribute_txn_data *dtbd_tdtd;
1351 * callback of updating commit batchid
1353 * Updating commit batchid then wake up the commit thread to cancel the
1356 * \param[in]env execution environment
1357 * \param[in]th thandle to updating commit batchid
1358 * \param[in]cb commit callback
1359 * \param[in]err result of thandle
1361 static void distribute_txn_batchid_cb(struct lu_env *env,
1363 struct dt_txn_commit_cb *cb,
1366 struct distribute_txn_bid_data *dtbd = NULL;
1367 struct target_distribute_txn_data *tdtd;
1369 dtbd = container_of0(cb, struct distribute_txn_bid_data, dtbd_cb);
1370 tdtd = dtbd->dtbd_tdtd;
1372 CDEBUG(D_HA, "%s: %llu batchid updated\n",
1373 tdtd->tdtd_lut->lut_obd->obd_name, dtbd->dtbd_batchid);
1374 spin_lock(&tdtd->tdtd_batchid_lock);
1375 if (dtbd->dtbd_batchid > tdtd->tdtd_committed_batchid &&
1376 !tdtd->tdtd_lut->lut_obd->obd_no_transno)
1377 tdtd->tdtd_committed_batchid = dtbd->dtbd_batchid;
1378 spin_unlock(&tdtd->tdtd_batchid_lock);
1379 atomic_dec(&tdtd->tdtd_refcount);
1380 wake_up(&tdtd->tdtd_commit_thread_waitq);
1386 * Update the commit batchid in disk
1388 * Update commit batchid in the disk, after this is committed, it can start
1389 * to cancel the update records.
1391 * \param[in] env execution environment
1392 * \param[in] tdtd distribute transaction structure
1393 * \param[in] batchid commit batchid to be updated
1395 * \retval 0 if update succeeds.
1396 * \retval negative errno if update fails.
1399 distribute_txn_commit_batchid_update(const struct lu_env *env,
1400 struct target_distribute_txn_data *tdtd,
1403 struct distribute_txn_bid_data *dtbd = NULL;
1411 OBD_ALLOC_PTR(dtbd);
1414 dtbd->dtbd_batchid = batchid;
1415 dtbd->dtbd_tdtd = tdtd;
1416 dtbd->dtbd_cb.dcb_func = distribute_txn_batchid_cb;
1417 atomic_inc(&tdtd->tdtd_refcount);
1419 th = dt_trans_create(env, tdtd->tdtd_lut->lut_bottom);
1421 atomic_dec(&tdtd->tdtd_refcount);
1423 RETURN(PTR_ERR(th));
1426 tmp = cpu_to_le64(batchid);
1428 buf.lb_len = sizeof(tmp);
1431 rc = dt_declare_record_write(env, tdtd->tdtd_batchid_obj, &buf, off,
1436 rc = dt_trans_start_local(env, tdtd->tdtd_lut->lut_bottom, th);
1440 rc = dt_trans_cb_add(th, &dtbd->dtbd_cb);
1444 rc = dt_record_write(env, tdtd->tdtd_batchid_obj, &buf,
1447 CDEBUG(D_INFO, "%s: update batchid %llu: rc = %d\n",
1448 tdtd->tdtd_lut->lut_obd->obd_name, batchid, rc);
1451 dt_trans_stop(env, tdtd->tdtd_lut->lut_bottom, th);
1453 atomic_dec(&tdtd->tdtd_refcount);
1460 * Init commit batchid for distribute transaction.
1462 * Initialize the batchid object and get commit batchid from the object.
1464 * \param[in] env execution environment
1465 * \param[in] tdtd distribute transaction whose batchid is initialized.
1467 * \retval 0 if initialization succeeds.
1468 * \retval negative errno if initialization fails.
1471 distribute_txn_commit_batchid_init(const struct lu_env *env,
1472 struct target_distribute_txn_data *tdtd)
1474 struct tgt_thread_info *tti = tgt_th_info(env);
1475 struct lu_target *lut = tdtd->tdtd_lut;
1476 struct lu_attr *attr = &tti->tti_attr;
1477 struct lu_fid *fid = &tti->tti_fid1;
1478 struct dt_object_format *dof = &tti->tti_u.update.tti_update_dof;
1479 struct dt_object *dt_obj = NULL;
1486 memset(attr, 0, sizeof(*attr));
1487 attr->la_valid = LA_MODE;
1488 attr->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
1489 dof->dof_type = dt_mode_to_dft(S_IFREG);
1491 lu_local_obj_fid(fid, BATCHID_COMMITTED_OID);
1493 dt_obj = dt_find_or_create(env, lut->lut_bottom, fid, dof,
1495 if (IS_ERR(dt_obj)) {
1496 rc = PTR_ERR(dt_obj);
1501 tdtd->tdtd_batchid_obj = dt_obj;
1504 buf.lb_len = sizeof(tmp);
1506 rc = dt_read(env, dt_obj, &buf, &off);
1507 if (rc < 0 || (rc < buf.lb_len && rc > 0)) {
1508 CERROR("%s can't read last committed batchid: rc = %d\n",
1509 tdtd->tdtd_lut->lut_obd->obd_name, rc);
1513 } else if (rc == buf.lb_len) {
1514 tdtd->tdtd_committed_batchid = le64_to_cpu(tmp);
1515 CDEBUG(D_HA, "%s: committed batchid %llu\n",
1516 tdtd->tdtd_lut->lut_obd->obd_name,
1517 tdtd->tdtd_committed_batchid);
1522 if (rc < 0 && dt_obj != NULL) {
1523 dt_object_put(env, dt_obj);
1524 tdtd->tdtd_batchid_obj = NULL;
1530 * manage the distribute transaction thread
1532 * Distribute transaction are linked to the list, and once the distribute
1533 * transaction is committed, it will update the last committed batchid first,
1534 * after it is committed, it will cancel the records.
1536 * \param[in] _arg argument for commit thread
1538 * \retval 0 if thread is running successfully
1539 * \retval negative errno if the thread can not be run.
1541 static int distribute_txn_commit_thread(void *_arg)
1543 struct target_distribute_txn_data *tdtd = _arg;
1544 struct lu_target *lut = tdtd->tdtd_lut;
1545 struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
1546 struct l_wait_info lwi = { 0 };
1548 struct list_head list;
1550 struct top_multiple_thandle *tmt;
1551 struct top_multiple_thandle *tmp;
1552 __u64 batchid = 0, committed;
1556 rc = lu_env_init(&env, LCT_LOCAL | LCT_MD_THREAD);
1560 spin_lock(&tdtd->tdtd_batchid_lock);
1561 thread->t_flags = SVC_RUNNING;
1562 spin_unlock(&tdtd->tdtd_batchid_lock);
1563 wake_up(&thread->t_ctl_waitq);
1564 INIT_LIST_HEAD(&list);
1566 CDEBUG(D_HA, "%s: start commit thread committed batchid %llu\n",
1567 tdtd->tdtd_lut->lut_obd->obd_name,
1568 tdtd->tdtd_committed_batchid);
1570 while (distribute_txn_commit_thread_running(lut)) {
1571 spin_lock(&tdtd->tdtd_batchid_lock);
1572 list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
1574 if (tmt->tmt_committed == 0)
1577 /* Note: right now, replay is based on master MDT
1578 * transno, but cancellation is based on batchid.
1579 * so we do not try to cancel the update log until
1580 * the recoverying is done, unless the update records
1581 * batchid < committed_batchid. */
1582 if (tmt->tmt_batchid <= tdtd->tdtd_committed_batchid) {
1583 list_move_tail(&tmt->tmt_commit_list, &list);
1584 } else if (!tdtd->tdtd_lut->lut_obd->obd_recovering) {
1585 LASSERTF(tmt->tmt_batchid >= batchid,
1586 "tmt %p tmt_batchid: %llu, batchid "
1587 "%llu\n", tmt, tmt->tmt_batchid,
1589 /* There are three types of distribution
1590 * transaction result
1592 * 1. If tmt_result < 0, it means the
1593 * distribution transaction fails, which should
1594 * be rare, because once declare phase succeeds,
1595 * the operation should succeeds anyway. Note in
1596 * this case, we will still update batchid so
1597 * cancellation would be stopped.
1599 * 2. If tmt_result == 0, it means the
1600 * distribution transaction succeeds, and we
1601 * will update batchid.
1603 * 3. If tmt_result > 0, it means distribute
1604 * transaction is not yet committed on every
1605 * node, but we need release this tmt before
1606 * that, which usuually happens during umount.
1608 if (tmt->tmt_result <= 0)
1609 batchid = tmt->tmt_batchid;
1610 list_move_tail(&tmt->tmt_commit_list, &list);
1613 spin_unlock(&tdtd->tdtd_batchid_lock);
1615 CDEBUG(D_HA, "%s: batchid: %llu committed batchid "
1616 "%llu\n", tdtd->tdtd_lut->lut_obd->obd_name, batchid,
1617 tdtd->tdtd_committed_batchid);
1618 /* update globally committed on a storage */
1619 if (batchid > tdtd->tdtd_committed_batchid) {
1620 rc = distribute_txn_commit_batchid_update(&env, tdtd,
1625 /* cancel the records for committed batchid's */
1626 /* XXX: should we postpone cancel's till the end of recovery? */
1627 committed = tdtd->tdtd_committed_batchid;
1628 list_for_each_entry_safe(tmt, tmp, &list, tmt_commit_list) {
1629 if (tmt->tmt_batchid > committed)
1631 list_del_init(&tmt->tmt_commit_list);
1632 if (tmt->tmt_result <= 0)
1633 distribute_txn_cancel_records(&env, tmt);
1634 top_multiple_thandle_put(tmt);
1637 l_wait_event(tdtd->tdtd_commit_thread_waitq,
1638 !distribute_txn_commit_thread_running(lut) ||
1639 committed < tdtd->tdtd_committed_batchid ||
1640 tdtd_ready_for_cancel_log(tdtd), &lwi);
1643 l_wait_event(tdtd->tdtd_commit_thread_waitq,
1644 atomic_read(&tdtd->tdtd_refcount) == 0, &lwi);
1646 spin_lock(&tdtd->tdtd_batchid_lock);
1647 list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
1649 list_move_tail(&tmt->tmt_commit_list, &list);
1650 spin_unlock(&tdtd->tdtd_batchid_lock);
1652 CDEBUG(D_INFO, "%s stopping distribute txn commit thread.\n",
1653 tdtd->tdtd_lut->lut_obd->obd_name);
1654 list_for_each_entry_safe(tmt, tmp, &list, tmt_commit_list) {
1655 list_del_init(&tmt->tmt_commit_list);
1656 top_multiple_thandle_dump(tmt, D_HA);
1657 top_multiple_thandle_put(tmt);
1660 thread->t_flags = SVC_STOPPED;
1662 wake_up(&thread->t_ctl_waitq);
1668 * Start llog cancel thread
1670 * Start llog cancel(master/slave) thread on LOD
1672 * \param[in]lclt cancel log thread to be started.
1674 * \retval 0 if the thread is started successfully.
1675 * \retval negative errno if the thread is not being
1678 int distribute_txn_init(const struct lu_env *env,
1679 struct lu_target *lut,
1680 struct target_distribute_txn_data *tdtd,
1683 struct task_struct *task;
1684 struct l_wait_info lwi = { 0 };
1688 INIT_LIST_HEAD(&tdtd->tdtd_list);
1689 INIT_LIST_HEAD(&tdtd->tdtd_replay_finish_list);
1690 INIT_LIST_HEAD(&tdtd->tdtd_replay_list);
1691 spin_lock_init(&tdtd->tdtd_batchid_lock);
1692 spin_lock_init(&tdtd->tdtd_replay_list_lock);
1693 tdtd->tdtd_replay_handler = distribute_txn_replay_handle;
1694 tdtd->tdtd_replay_ready = 0;
1696 tdtd->tdtd_batchid = lut->lut_last_transno + 1;
1698 init_waitqueue_head(&lut->lut_tdtd_commit_thread.t_ctl_waitq);
1699 init_waitqueue_head(&tdtd->tdtd_commit_thread_waitq);
1700 init_waitqueue_head(&tdtd->tdtd_recovery_threads_waitq);
1701 atomic_set(&tdtd->tdtd_refcount, 0);
1702 atomic_set(&tdtd->tdtd_recovery_threads_count, 0);
1704 tdtd->tdtd_lut = lut;
1705 if (lut->lut_bottom->dd_rdonly)
1708 rc = distribute_txn_commit_batchid_init(env, tdtd);
1712 task = kthread_run(distribute_txn_commit_thread, tdtd, "dist_txn-%u",
1715 RETURN(PTR_ERR(task));
1717 l_wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
1718 distribute_txn_commit_thread_running(lut) ||
1719 distribute_txn_commit_thread_stopped(lut), &lwi);
1722 EXPORT_SYMBOL(distribute_txn_init);
1725 * Stop llog cancel thread
1727 * Stop llog cancel(master/slave) thread on LOD and also destory
1728 * all of transaction in the list.
1730 * \param[in]lclt cancel log thread to be stopped.
1732 void distribute_txn_fini(const struct lu_env *env,
1733 struct target_distribute_txn_data *tdtd)
1735 struct lu_target *lut = tdtd->tdtd_lut;
1737 /* Stop cancel thread */
1738 if (lut == NULL || !distribute_txn_commit_thread_running(lut))
1741 spin_lock(&tdtd->tdtd_batchid_lock);
1742 lut->lut_tdtd_commit_thread.t_flags = SVC_STOPPING;
1743 spin_unlock(&tdtd->tdtd_batchid_lock);
1744 wake_up(&tdtd->tdtd_commit_thread_waitq);
1745 wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
1746 lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED);
1748 dtrq_list_destroy(tdtd);
1749 if (tdtd->tdtd_batchid_obj != NULL) {
1750 dt_object_put(env, tdtd->tdtd_batchid_obj);
1751 tdtd->tdtd_batchid_obj = NULL;
1754 EXPORT_SYMBOL(distribute_txn_fini);