4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2014, Intel Corporation.
26 * lustre/target/update_trans.c
28 * This file implements the update distribute transaction API.
30 * To manage the cross-MDT operation (distribute operation) transaction,
31 * the transaction will also be separated two layers on MD stack, top
32 * transaction and sub transaction.
34 * During the distribute operation, top transaction is created in the LOD
35 * layer, and represent the operation. Sub transaction is created by
36 * each OSD or OSP. Top transaction start/stop will trigger all of its sub
37 * transaction start/stop. Top transaction (the whole operation) is committed
38 * only all of its sub transaction are committed.
40 * there are three kinds of transactions
41 * 1. local transaction: All updates are in a single local OSD.
42 * 2. Remote transaction: All Updates are only in the remote OSD,
43 * i.e. locally all updates are in OSP.
44 * 3. Mixed transaction: Updates are both in local OSD and remote
47 * Author: Di Wang <di.wang@intel.com>
50 #define DEBUG_SUBSYSTEM S_CLASS
52 #include <linux/kthread.h>
53 #include <lu_target.h>
54 #include <lustre_log.h>
55 #include <lustre_update.h>
57 #include <obd_class.h>
58 #include <tgt_internal.h>
60 #include <tgt_internal.h>
62 * Dump top mulitple thandle
64 * Dump top multiple thandle and all of its sub thandle to the debug log.
66 * \param[in]mask debug mask
67 * \param[in]top_th top_thandle to be dumped
69 static void top_multiple_thandle_dump(struct top_multiple_thandle *tmt,
72 struct sub_thandle *st;
74 LASSERT(tmt->tmt_magic == TOP_THANDLE_MAGIC);
75 CDEBUG(mask, "%s tmt %p refcount %d committed %d result %d"
77 tmt->tmt_master_sub_dt ?
78 tmt->tmt_master_sub_dt->dd_lu_dev.ld_obd->obd_name :
80 tmt, atomic_read(&tmt->tmt_refcount), tmt->tmt_committed,
81 tmt->tmt_result, tmt->tmt_batchid);
83 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
84 struct sub_thandle_cookie *stc;
86 CDEBUG(mask, "st %p obd %s committed %d sub_th %p\n",
87 st, st->st_dt->dd_lu_dev.ld_obd->obd_name,
88 st->st_committed, st->st_sub_th);
90 list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
91 CDEBUG(mask, " cookie "DOSTID": %u\n",
92 POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
93 stc->stc_cookie.lgc_index);
99 * Declare write update to sub device
101 * Declare Write updates llog records to the sub device during distribute
104 * \param[in] env execution environment
105 * \param[in] record update records being written
106 * \param[in] sub_th sub transaction handle
107 * \param[in] record_size total update record size
109 * \retval 0 if writing succeeds
110 * \retval negative errno if writing fails
112 static int sub_declare_updates_write(const struct lu_env *env,
113 struct llog_update_record *record,
114 struct thandle *sub_th, size_t record_size)
116 struct llog_ctxt *ctxt;
117 struct dt_device *dt = sub_th->th_dev;
118 int left = record_size;
121 /* If ctxt is NULL, it means not need to write update,
122 * for example if the the OSP is used to connect to OST */
123 ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
124 LLOG_UPDATELOG_ORIG_CTXT);
126 /* Not ready to record updates yet. */
127 if (ctxt == NULL || ctxt->loc_handle == NULL) {
132 rc = llog_declare_add(env, ctxt->loc_handle,
133 &record->lur_hdr, sub_th);
137 while (left > ctxt->loc_chunk_size) {
138 rc = llog_declare_add(env, ctxt->loc_handle,
139 &record->lur_hdr, sub_th);
143 left -= ctxt->loc_chunk_size;
153 * write update to sub device
155 * Write llog update record to the sub device during distribute
156 * transaction. If it succeeds, llog cookie of the record will be
157 * returned by @cookie.
159 * \param[in] env execution environment
160 * \param[in] record update records being written
161 * \param[in] sub_th sub transaction handle
162 * \param[out] cookie llog cookie of the update record.
164 * \retval 1 if writing succeeds
165 * \retval negative errno if writing fails
167 static int sub_updates_write(const struct lu_env *env,
168 struct llog_update_record *record,
169 struct sub_thandle *sub_th)
171 struct dt_device *dt = sub_th->st_dt;
172 struct llog_ctxt *ctxt;
174 struct llog_update_record *lur = NULL;
175 struct update_params *params = NULL;
176 __u32 update_count = 0;
177 __u32 param_count = 0;
178 __u32 last_update_count = 0;
179 __u32 last_param_count = 0;
183 struct sub_thandle_cookie *stc;
186 ctxt = llog_get_context(dt->dd_lu_dev.ld_obd,
187 LLOG_UPDATELOG_ORIG_CTXT);
188 /* If ctxt == NULL, then it means updates on OST (only happens
189 * during migration), and we do not track those updates for now */
190 /* If ctxt->loc_handle == NULL, then it does not need to record
191 * update, usually happens in error handler path */
192 if (ctxt == NULL || ctxt->loc_handle == NULL) {
197 /* Since the cross-MDT updates will includes both local
198 * and remote updates, the update ops count must > 1 */
199 LASSERT(record->lur_update_rec.ur_update_count > 1);
200 LASSERTF(record->lur_hdr.lrh_len == llog_update_record_size(record),
201 "lrh_len %u record_size %zu\n", record->lur_hdr.lrh_len,
202 llog_update_record_size(record));
204 if (likely(record->lur_hdr.lrh_len <= ctxt->loc_chunk_size)) {
207 GOTO(llog_put, rc = -ENOMEM);
208 INIT_LIST_HEAD(&stc->stc_list);
210 rc = llog_add(env, ctxt->loc_handle, &record->lur_hdr,
211 &stc->stc_cookie, sub_th->st_sub_th);
213 CDEBUG(D_INFO, "%s: Add update log "DOSTID":%u: rc = %d\n",
214 dt->dd_lu_dev.ld_obd->obd_name,
215 POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
216 stc->stc_cookie.lgc_index, rc);
219 list_add(&stc->stc_list, &sub_th->st_cookie_list);
228 /* Split the records into chunk_size update record */
229 OBD_ALLOC_LARGE(lur, ctxt->loc_chunk_size);
231 GOTO(llog_put, rc = -ENOMEM);
233 memcpy(lur, &record->lur_hdr, sizeof(record->lur_hdr));
234 lur->lur_update_rec.ur_update_count = 0;
235 lur->lur_update_rec.ur_param_count = 0;
236 src = &record->lur_update_rec.ur_ops;
238 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
239 params = update_records_get_params(&record->lur_update_rec);
243 if (update_count < record->lur_update_rec.ur_update_count) {
244 next = update_op_next_op((struct update_op *)src);
246 if (param_count == 0)
247 next = update_records_get_params(
248 &record->lur_update_rec);
251 object_update_param_size(
252 (struct object_update_param *)src);
255 rec_len = cfs_size_round((unsigned long)(next - src));
256 /* If its size > llog chunk_size, then write current chunk to
257 * the update llog. */
258 if (lur->lur_hdr.lrh_len + rec_len + LLOG_MIN_REC_SIZE >
259 ctxt->loc_chunk_size ||
260 param_count == record->lur_update_rec.ur_param_count) {
261 lur->lur_update_rec.ur_update_count =
262 update_count > last_update_count ?
263 update_count - last_update_count : 0;
264 lur->lur_update_rec.ur_param_count = param_count -
267 memcpy(&lur->lur_update_rec.ur_ops, start,
268 (unsigned long)(src - start));
269 if (last_update_count != 0)
270 lur->lur_update_rec.ur_flags |=
271 UPDATE_RECORD_CONTINUE;
273 update_records_dump(&lur->lur_update_rec, D_INFO, true);
274 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
275 LASSERT(lur->lur_hdr.lrh_len <= ctxt->loc_chunk_size);
279 GOTO(llog_put, rc = -ENOMEM);
280 INIT_LIST_HEAD(&stc->stc_list);
282 rc = llog_add(env, ctxt->loc_handle,
284 &stc->stc_cookie, sub_th->st_sub_th);
286 CDEBUG(D_INFO, "%s: Add update log "DOSTID":%u"
287 " rc = %d\n", dt->dd_lu_dev.ld_obd->obd_name,
288 POSTID(&stc->stc_cookie.lgc_lgl.lgl_oi),
289 stc->stc_cookie.lgc_index, rc);
292 list_add(&stc->stc_list,
293 &sub_th->st_cookie_list);
300 last_update_count = update_count;
301 last_param_count = param_count;
303 lur->lur_update_rec.ur_update_count = 0;
304 lur->lur_update_rec.ur_param_count = 0;
305 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
309 lur->lur_hdr.lrh_len += cfs_size_round(rec_len);
310 if (update_count < record->lur_update_rec.ur_update_count)
312 else if (param_count < record->lur_update_rec.ur_param_count)
320 OBD_FREE_LARGE(lur, ctxt->loc_chunk_size);
327 * Prepare the update records.
329 * Merge params and ops into the update records, then initializing
332 * During transaction execution phase, parameters and update ops
333 * are collected in two different buffers (see lod_updates_pack()),
334 * during transaction stop, it needs to be merged in one buffer,
335 * so it will be written in the update log.
337 * \param[in] env execution environment
338 * \param[in] tmt top_multiple_thandle for distribute txn
340 * \retval 0 if merging succeeds.
341 * \retval negaitive errno if merging fails.
343 static int prepare_writing_updates(const struct lu_env *env,
344 struct top_multiple_thandle *tmt)
346 struct thandle_update_records *tur = tmt->tmt_update_records;
347 struct llog_update_record *lur;
348 struct update_params *params;
352 if (tur == NULL || tur->tur_update_records == NULL ||
353 tur->tur_update_params == NULL)
356 lur = tur->tur_update_records;
357 /* Extends the update records buffer if needed */
358 params_size = update_params_size(tur->tur_update_params,
359 tur->tur_update_param_count);
360 LASSERT(lur->lur_update_rec.ur_param_count == 0);
361 update_size = llog_update_record_size(lur);
362 if (cfs_size_round(update_size + params_size) >
363 tur->tur_update_records_buf_size) {
366 rc = tur_update_records_extend(tur,
367 cfs_size_round(update_size + params_size));
371 lur = tur->tur_update_records;
374 params = update_records_get_params(&lur->lur_update_rec);
375 memcpy(params, tur->tur_update_params, params_size);
377 lur->lur_update_rec.ur_param_count = tur->tur_update_param_count;
378 lur->lur_update_rec.ur_batchid = tmt->tmt_batchid;
379 /* Init update record header */
380 lur->lur_hdr.lrh_len = llog_update_record_size(lur);
381 lur->lur_hdr.lrh_type = UPDATE_REC;
383 /* Dump updates for debugging purpose */
384 update_records_dump(&lur->lur_update_rec, D_INFO, true);
390 distribute_txn_commit_thread_running(struct lu_target *lut)
392 return lut->lut_tdtd_commit_thread.t_flags & SVC_RUNNING;
396 distribute_txn_commit_thread_stopped(struct lu_target *lut)
398 return lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED;
402 * Top thandle commit callback
404 * This callback will be called when all of sub transactions are committed.
406 * \param[in] th top thandle to be committed.
408 static void top_trans_committed_cb(struct top_multiple_thandle *tmt)
410 struct lu_target *lut;
413 LASSERT(atomic_read(&tmt->tmt_refcount) > 0);
415 top_multiple_thandle_dump(tmt, D_HA);
416 tmt->tmt_committed = 1;
417 lut = dt2lu_dev(tmt->tmt_master_sub_dt)->ld_site->ls_tgt;
418 if (distribute_txn_commit_thread_running(lut))
419 wake_up(&lut->lut_tdtd->tdtd_commit_thread_waitq);
423 struct sub_thandle *lookup_sub_thandle(struct top_multiple_thandle *tmt,
424 struct dt_device *dt_dev)
426 struct sub_thandle *st;
428 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
429 if (st->st_dt == dt_dev)
434 EXPORT_SYMBOL(lookup_sub_thandle);
436 struct sub_thandle *create_sub_thandle(struct top_multiple_thandle *tmt,
437 struct dt_device *dt_dev)
439 struct sub_thandle *st;
443 RETURN(ERR_PTR(-ENOMEM));
445 INIT_LIST_HEAD(&st->st_sub_list);
446 INIT_LIST_HEAD(&st->st_cookie_list);
449 list_add(&st->st_sub_list, &tmt->tmt_sub_thandle_list);
454 * sub thandle commit callback
456 * Mark the sub thandle to be committed and if all sub thandle are committed
457 * notify the top thandle.
459 * \param[in] env execution environment
460 * \param[in] sub_th sub thandle being committed
461 * \param[in] cb commit callback
462 * \param[in] err trans result
464 static void sub_trans_commit_cb(struct lu_env *env,
465 struct thandle *sub_th,
466 struct dt_txn_commit_cb *cb, int err)
468 struct sub_thandle *st;
469 struct top_multiple_thandle *tmt = cb->dcb_data;
470 bool all_committed = true;
473 /* Check if all sub thandles are committed */
474 spin_lock(&tmt->tmt_sub_lock);
475 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
476 if (st->st_sub_th == sub_th) {
477 st->st_committed = 1;
480 if (!st->st_committed)
481 all_committed = false;
483 spin_unlock(&tmt->tmt_sub_lock);
485 if (tmt->tmt_result == 0)
486 tmt->tmt_result = err;
489 top_trans_committed_cb(tmt);
491 top_multiple_thandle_dump(tmt, D_INFO);
492 top_multiple_thandle_put(tmt);
496 static void sub_thandle_register_commit_cb(struct sub_thandle *st,
497 struct top_multiple_thandle *tmt)
499 LASSERT(st->st_sub_th != NULL);
500 top_multiple_thandle_get(tmt);
501 st->st_commit_dcb.dcb_func = sub_trans_commit_cb;
502 st->st_commit_dcb.dcb_data = tmt;
503 INIT_LIST_HEAD(&st->st_commit_dcb.dcb_linkage);
504 dt_trans_cb_add(st->st_sub_th, &st->st_commit_dcb);
508 * Sub thandle stop call back
510 * After sub thandle is stopped, it will call this callback to notify
513 * \param[in] th sub thandle to be stopped
514 * \param[in] rc result of sub trans
516 static void sub_trans_stop_cb(struct lu_env *env,
517 struct thandle *sub_th,
518 struct dt_txn_commit_cb *cb, int err)
520 struct sub_thandle *st;
521 struct top_multiple_thandle *tmt = cb->dcb_data;
524 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
528 if (st->st_dt == sub_th->th_dev) {
535 wake_up(&tmt->tmt_stop_waitq);
539 static void sub_thandle_register_stop_cb(struct sub_thandle *st,
540 struct top_multiple_thandle *tmt)
542 st->st_stop_dcb.dcb_func = sub_trans_stop_cb;
543 st->st_stop_dcb.dcb_data = tmt;
544 st->st_stop_dcb.dcb_flags = DCB_TRANS_STOP;
545 INIT_LIST_HEAD(&st->st_stop_dcb.dcb_linkage);
546 dt_trans_cb_add(st->st_sub_th, &st->st_stop_dcb);
552 * Create transaction handle for sub_thandle
554 * \param[in] env execution environment
555 * \param[in] th top thandle
556 * \param[in] st sub_thandle
558 * \retval 0 if creation succeeds.
559 * \retval negative errno if creation fails.
561 int sub_thandle_trans_create(const struct lu_env *env,
562 struct top_thandle *top_th,
563 struct sub_thandle *st)
565 struct thandle *sub_th;
567 sub_th = dt_trans_create(env, st->st_dt);
569 return PTR_ERR(sub_th);
571 sub_th->th_top = &top_th->tt_super;
572 st->st_sub_th = sub_th;
574 sub_th->th_wait_submit = 1;
579 * Create the top transaction.
581 * Create the top transaction on the master device. It will create a top
582 * thandle and a sub thandle on the master device.
584 * \param[in] env execution environment
585 * \param[in] master_dev master_dev the top thandle will be created
587 * \retval pointer to the created thandle.
588 * \retval ERR_PTR(errno) if creation failed.
591 top_trans_create(const struct lu_env *env, struct dt_device *master_dev)
593 struct top_thandle *top_th;
594 struct thandle *child_th;
596 OBD_ALLOC_GFP(top_th, sizeof(*top_th), __GFP_IO);
598 return ERR_PTR(-ENOMEM);
600 top_th->tt_super.th_top = &top_th->tt_super;
602 if (master_dev != NULL) {
603 child_th = dt_trans_create(env, master_dev);
604 if (IS_ERR(child_th)) {
605 OBD_FREE_PTR(top_th);
609 child_th->th_top = &top_th->tt_super;
610 child_th->th_wait_submit = 1;
611 top_th->tt_master_sub_thandle = child_th;
613 top_th->tt_super.th_tags |= child_th->th_tags;
615 return &top_th->tt_super;
617 EXPORT_SYMBOL(top_trans_create);
620 * Declare write update transaction
622 * Check if there are updates being recorded in this transaction,
623 * it will write the record into the disk.
625 * \param[in] env execution environment
626 * \param[in] tmt top multiple transaction handle
628 * \retval 0 if writing succeeds
629 * \retval negative errno if writing fails
631 static int declare_updates_write(const struct lu_env *env,
632 struct top_multiple_thandle *tmt)
634 struct llog_update_record *record;
635 struct sub_thandle *st;
638 record = tmt->tmt_update_records->tur_update_records;
639 /* Declare update write for all other target */
640 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
641 if (st->st_sub_th == NULL)
644 rc = sub_declare_updates_write(env, record, st->st_sub_th,
645 tmt->tmt_record_size);
654 * Assign batchid to the distribute transaction.
656 * Assign batchid to the distribute transaction
658 * \param[in] tmt distribute transaction
660 static void distribute_txn_assign_batchid(struct top_multiple_thandle *new)
662 struct target_distribute_txn_data *tdtd;
663 struct dt_device *dt = new->tmt_master_sub_dt;
666 tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
667 spin_lock(&tdtd->tdtd_batchid_lock);
668 new->tmt_batchid = tdtd->tdtd_batchid++;
669 list_add_tail(&new->tmt_commit_list, &tdtd->tdtd_list);
670 spin_unlock(&tdtd->tdtd_batchid_lock);
671 top_multiple_thandle_get(new);
672 top_multiple_thandle_dump(new, D_INFO);
676 * Insert distribute transaction to the distribute txn list.
678 * Insert distribute transaction to the distribute txn list.
680 * \param[in] new the distribute txn to be inserted.
682 void distribute_txn_insert_by_batchid(struct top_multiple_thandle *new)
684 struct dt_device *dt = new->tmt_master_sub_dt;
685 struct top_multiple_thandle *tmt;
686 struct target_distribute_txn_data *tdtd;
687 bool at_head = false;
690 tdtd = dt2lu_dev(dt)->ld_site->ls_tgt->lut_tdtd;
692 spin_lock(&tdtd->tdtd_batchid_lock);
693 list_for_each_entry_reverse(tmt, &tdtd->tdtd_list, tmt_commit_list) {
694 if (new->tmt_batchid > tmt->tmt_batchid) {
695 list_add(&new->tmt_commit_list, &tmt->tmt_commit_list);
699 if (list_empty(&new->tmt_commit_list)) {
701 list_add(&new->tmt_commit_list, &tdtd->tdtd_list);
703 spin_unlock(&tdtd->tdtd_batchid_lock);
704 top_multiple_thandle_get(new);
705 top_multiple_thandle_dump(new, D_INFO);
706 if (new->tmt_committed && at_head)
707 wake_up(&tdtd->tdtd_commit_thread_waitq);
711 * Prepare cross-MDT operation.
713 * Create the update record buffer to record updates for cross-MDT operation,
714 * add master sub transaction to tt_sub_trans_list, and declare the update
717 * During updates packing, all of parameters will be packed in
718 * tur_update_params, and updates will be packed in tur_update_records.
719 * Then in transaction stop, parameters and updates will be merged
720 * into one updates buffer.
722 * And also master thandle will be added to the sub_th list, so it will be
723 * easy to track the commit status.
725 * \param[in] env execution environment
726 * \param[in] th top transaction handle
728 * \retval 0 if preparation succeeds.
729 * \retval negative errno if preparation fails.
731 static int prepare_multiple_node_trans(const struct lu_env *env,
732 struct top_multiple_thandle *tmt)
734 struct thandle_update_records *tur;
738 if (tmt->tmt_update_records == NULL) {
739 tur = &update_env_info(env)->uti_tur;
740 rc = check_and_prepare_update_record(env, tur);
744 tmt->tmt_update_records = tur;
745 distribute_txn_assign_batchid(tmt);
748 rc = declare_updates_write(env, tmt);
754 * start the top transaction.
756 * Start all of its sub transactions, then start master sub transaction.
758 * \param[in] env execution environment
759 * \param[in] master_dev master_dev the top thandle will be start
760 * \param[in] th top thandle
762 * \retval 0 if transaction start succeeds.
763 * \retval negative errno if start fails.
765 int top_trans_start(const struct lu_env *env, struct dt_device *master_dev,
768 struct top_thandle *top_th = container_of(th, struct top_thandle,
770 struct sub_thandle *st;
771 struct top_multiple_thandle *tmt = top_th->tt_multiple_thandle;
776 rc = dt_trans_start(env, top_th->tt_master_sub_thandle->th_dev,
777 top_th->tt_master_sub_thandle);
781 tmt = top_th->tt_multiple_thandle;
782 rc = prepare_multiple_node_trans(env, tmt);
786 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
787 if (st->st_sub_th == NULL)
790 st->st_sub_th->th_sync = th->th_sync;
791 st->st_sub_th->th_local = th->th_local;
792 st->st_sub_th->th_tags = th->th_tags;
793 rc = dt_trans_start(env, st->st_sub_th->th_dev,
798 sub_thandle_register_stop_cb(st, tmt);
799 sub_thandle_register_commit_cb(st, tmt);
805 EXPORT_SYMBOL(top_trans_start);
808 * Check whether we need write updates record
810 * Check if the updates for the top_thandle needs to be writen
811 * to all targets. Only if the transaction succeeds and the updates
812 * number > 2, it will write the updates,
814 * \params [in] top_th top thandle.
816 * \retval true if it needs to write updates
817 * \retval false if it does not need to write updates
819 static bool top_check_write_updates(struct top_thandle *top_th)
821 struct top_multiple_thandle *tmt;
822 struct thandle_update_records *tur;
824 /* Do not write updates to records if the transaction fails */
825 if (top_th->tt_super.th_result != 0)
828 tmt = top_th->tt_multiple_thandle;
832 tur = tmt->tmt_update_records;
836 /* Hmm, false update records, since the cross-MDT operation
837 * should includes both local and remote updates, so the
838 * updates count should >= 2 */
839 if (tur->tur_update_records == NULL ||
840 tur->tur_update_records->lur_update_rec.ur_update_count <= 1)
847 * Check if top transaction is stopped
849 * Check if top transaction is stopped, only if all sub transaction
850 * is stopped, then the top transaction is stopped.
852 * \param [in] top_th top thandle
854 * \retval true if the top transaction is stopped.
855 * \retval false if the top transaction is not stopped.
857 static bool top_trans_is_stopped(struct top_thandle *top_th)
859 struct top_multiple_thandle *tmt;
860 struct sub_thandle *st;
861 bool all_stopped = true;
863 tmt = top_th->tt_multiple_thandle;
864 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
865 if (!st->st_stopped && st->st_sub_th != NULL) {
870 if (st->st_result != 0 &&
871 top_th->tt_super.th_result == 0)
872 top_th->tt_super.th_result = st->st_result;
879 * Wait result of top transaction
881 * Wait until all sub transaction get its result.
883 * \param [in] top_th top thandle.
885 * \retval the result of top thandle.
887 static int top_trans_wait_result(struct top_thandle *top_th)
889 struct l_wait_info lwi = {0};
891 l_wait_event(top_th->tt_multiple_thandle->tmt_stop_waitq,
892 top_trans_is_stopped(top_th), &lwi);
894 RETURN(top_th->tt_super.th_result);
898 * Stop the top transaction.
900 * Stop the transaction on the master device first, then stop transactions
901 * on other sub devices.
903 * \param[in] env execution environment
904 * \param[in] master_dev master_dev the top thandle will be created
905 * \param[in] th top thandle
907 * \retval 0 if stop transaction succeeds.
908 * \retval negative errno if stop transaction fails.
910 int top_trans_stop(const struct lu_env *env, struct dt_device *master_dev,
913 struct top_thandle *top_th = container_of(th, struct top_thandle,
915 struct sub_thandle *st;
916 struct sub_thandle *master_st;
917 struct top_multiple_thandle *tmt;
918 struct thandle_update_records *tur;
919 bool write_updates = false;
923 if (likely(top_th->tt_multiple_thandle == NULL)) {
924 LASSERT(master_dev != NULL);
925 rc = dt_trans_stop(env, master_dev,
926 top_th->tt_master_sub_thandle);
927 OBD_FREE_PTR(top_th);
931 tmt = top_th->tt_multiple_thandle;
932 tur = tmt->tmt_update_records;
934 /* Note: we need stop the master thandle first, then the stop
935 * callback will fill the master transno in the update logs,
936 * then these update logs will be sent to other MDTs */
937 /* get the master sub thandle */
938 master_st = lookup_sub_thandle(tmt, tmt->tmt_master_sub_dt);
939 write_updates = top_check_write_updates(top_th);
941 /* Step 1: write the updates log on Master MDT */
942 if (master_st != NULL && master_st->st_sub_th != NULL &&
944 struct llog_update_record *lur;
946 /* Merge the parameters and updates into one buffer */
947 rc = prepare_writing_updates(env, tmt);
949 CERROR("%s: cannot prepare updates: rc = %d\n",
950 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
952 GOTO(stop_master_trans, rc);
955 lur = tur->tur_update_records;
956 /* Write updates to the master MDT */
957 rc = sub_updates_write(env, lur, master_st);
959 /* Cleanup the common parameters in the update records,
960 * master transno callback might add more parameters.
961 * and we need merge the update records again in the
963 if (tur->tur_update_params != NULL)
964 lur->lur_update_rec.ur_param_count = 0;
967 CERROR("%s: write updates failed: rc = %d\n",
968 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
970 GOTO(stop_master_trans, rc);
975 /* Step 2: Stop the transaction on the master MDT, and fill the
976 * master transno in the update logs to other MDT. */
977 if (master_st != NULL && master_st->st_sub_th != NULL) {
978 master_st->st_sub_th->th_local = th->th_local;
980 master_st->st_sub_th->th_sync = th->th_sync;
981 master_st->st_sub_th->th_tags = th->th_tags;
982 master_st->st_sub_th->th_result = th->th_result;
983 rc = dt_trans_stop(env, master_st->st_dt, master_st->st_sub_th);
986 GOTO(stop_other_trans, rc);
987 } else if (tur != NULL && tur->tur_update_records != NULL) {
988 struct llog_update_record *lur;
990 lur = tur->tur_update_records;
991 if (lur->lur_update_rec.ur_master_transno == 0)
992 /* Update master transno after master stop
994 lur->lur_update_rec.ur_master_transno =
995 tgt_th_info(env)->tti_transno;
999 /* Step 3: write updates to other MDTs */
1000 if (write_updates) {
1001 struct llog_update_record *lur;
1003 /* Stop callback of master will add more updates and also update
1004 * master transno, so merge the parameters and updates into one
1006 rc = prepare_writing_updates(env, tmt);
1008 CERROR("%s: prepare updates failed: rc = %d\n",
1009 master_dev->dd_lu_dev.ld_obd->obd_name, rc);
1011 GOTO(stop_other_trans, rc);
1013 lur = tur->tur_update_records;
1014 list_for_each_entry(st, &tmt->tmt_sub_thandle_list,
1016 if (st->st_sub_th == NULL || st == master_st ||
1017 st->st_sub_th->th_result < 0)
1020 rc = sub_updates_write(env, lur, st);
1029 /* Step 4: Stop the transaction on other MDTs */
1030 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
1031 if (st == master_st || st->st_sub_th == NULL)
1035 st->st_sub_th->th_sync = th->th_sync;
1036 st->st_sub_th->th_local = th->th_local;
1037 st->st_sub_th->th_tags = th->th_tags;
1038 st->st_sub_th->th_result = th->th_result;
1039 rc = dt_trans_stop(env, st->st_sub_th->th_dev,
1041 if (unlikely(rc < 0 && th->th_result == 0))
1045 rc = top_trans_wait_result(top_th);
1047 tmt->tmt_result = rc;
1049 /* Balance for the refcount in top_trans_create, Note: if it is NOT
1050 * multiple node transaction, the top transaction will be destroyed. */
1051 top_multiple_thandle_put(tmt);
1052 OBD_FREE_PTR(top_th);
1055 EXPORT_SYMBOL(top_trans_stop);
1058 * Create top_multiple_thandle for top_thandle
1060 * Create top_mutilple_thandle to manage the mutiple node transaction
1061 * for top_thandle, and it also needs to add master sub thandle to the
1062 * sub trans list now.
1064 * \param[in] env execution environment
1065 * \param[in] top_th the top thandle
1067 * \retval 0 if creation succeeds
1068 * \retval negative errno if creation fails
1070 int top_trans_create_tmt(const struct lu_env *env,
1071 struct top_thandle *top_th)
1073 struct top_multiple_thandle *tmt;
1079 tmt->tmt_magic = TOP_THANDLE_MAGIC;
1080 INIT_LIST_HEAD(&tmt->tmt_sub_thandle_list);
1081 INIT_LIST_HEAD(&tmt->tmt_commit_list);
1082 atomic_set(&tmt->tmt_refcount, 1);
1083 spin_lock_init(&tmt->tmt_sub_lock);
1084 init_waitqueue_head(&tmt->tmt_stop_waitq);
1086 top_th->tt_multiple_thandle = tmt;
1091 static struct sub_thandle *
1092 create_sub_thandle_with_thandle(struct top_thandle *top_th,
1093 struct thandle *sub_th)
1095 struct sub_thandle *st;
1097 /* create and init sub th to the top trans list */
1098 st = create_sub_thandle(top_th->tt_multiple_thandle,
1103 st->st_sub_th = sub_th;
1105 sub_th->th_top = &top_th->tt_super;
1112 * Get sub thandle from the top thandle according to the sub dt_device.
1114 * \param[in] env execution environment
1115 * \param[in] th thandle on the top layer.
1116 * \param[in] sub_dt sub dt_device used to get sub transaction
1118 * \retval thandle of sub transaction if succeed
1119 * \retval PTR_ERR(errno) if failed
1121 struct thandle *thandle_get_sub_by_dt(const struct lu_env *env,
1123 struct dt_device *sub_dt)
1125 struct sub_thandle *st = NULL;
1126 struct top_thandle *top_th;
1127 struct thandle *sub_th = NULL;
1131 top_th = container_of(th, struct top_thandle, tt_super);
1133 if (likely(sub_dt == top_th->tt_master_sub_thandle->th_dev))
1134 RETURN(top_th->tt_master_sub_thandle);
1136 if (top_th->tt_multiple_thandle != NULL) {
1137 st = lookup_sub_thandle(top_th->tt_multiple_thandle, sub_dt);
1139 RETURN(st->st_sub_th);
1142 sub_th = dt_trans_create(env, sub_dt);
1146 /* Create top_multiple_thandle if necessary */
1147 if (top_th->tt_multiple_thandle == NULL) {
1148 struct top_multiple_thandle *tmt;
1150 rc = top_trans_create_tmt(env, top_th);
1152 GOTO(stop_trans, rc);
1154 tmt = top_th->tt_multiple_thandle;
1156 /* Add master sub th to the top trans list */
1157 tmt->tmt_master_sub_dt =
1158 top_th->tt_master_sub_thandle->th_dev;
1159 st = create_sub_thandle_with_thandle(top_th,
1160 top_th->tt_master_sub_thandle);
1162 GOTO(stop_trans, rc = PTR_ERR(st));
1165 /* create and init sub th to the top trans list */
1166 st = create_sub_thandle_with_thandle(top_th, sub_th);
1167 st->st_sub_th->th_wait_submit = 1;
1172 sub_th->th_result = rc;
1173 dt_trans_stop(env, sub_dt, sub_th);
1174 sub_th = ERR_PTR(rc);
1179 EXPORT_SYMBOL(thandle_get_sub_by_dt);
1182 * Top multiple thandle destroy
1184 * Destroy multiple thandle and all its sub thandle.
1186 * \param[in] tmt top_multiple_thandle to be destroyed.
1188 void top_multiple_thandle_destroy(struct top_multiple_thandle *tmt)
1190 struct sub_thandle *st;
1191 struct sub_thandle *tmp;
1193 LASSERT(tmt->tmt_magic == TOP_THANDLE_MAGIC);
1194 list_for_each_entry_safe(st, tmp, &tmt->tmt_sub_thandle_list,
1196 struct sub_thandle_cookie *stc;
1197 struct sub_thandle_cookie *tmp;
1199 list_del(&st->st_sub_list);
1200 list_for_each_entry_safe(stc, tmp, &st->st_cookie_list,
1202 list_del(&stc->stc_list);
1209 EXPORT_SYMBOL(top_multiple_thandle_destroy);
1212 * Cancel the update log on MDTs
1214 * Cancel the update log on MDTs then destroy the thandle.
1216 * \param[in] env execution environment
1217 * \param[in] tmt the top multiple thandle whose updates records
1218 * will be cancelled.
1220 * \retval 0 if cancellation succeeds.
1221 * \retval negative errno if cancellation fails.
1223 static int distribute_txn_cancel_records(const struct lu_env *env,
1224 struct top_multiple_thandle *tmt)
1226 struct sub_thandle *st;
1229 top_multiple_thandle_dump(tmt, D_INFO);
1230 /* Cancel update logs on other MDTs */
1231 list_for_each_entry(st, &tmt->tmt_sub_thandle_list, st_sub_list) {
1232 struct llog_ctxt *ctxt;
1233 struct obd_device *obd;
1234 struct llog_cookie *cookie;
1235 struct sub_thandle_cookie *stc;
1238 obd = st->st_dt->dd_lu_dev.ld_obd;
1239 ctxt = llog_get_context(obd, LLOG_UPDATELOG_ORIG_CTXT);
1242 list_for_each_entry(stc, &st->st_cookie_list, stc_list) {
1243 cookie = &stc->stc_cookie;
1244 if (fid_is_zero(&cookie->lgc_lgl.lgl_oi.oi_fid))
1247 rc = llog_cat_cancel_records(env, ctxt->loc_handle, 1,
1249 CDEBUG(D_HA, "%s: batchid %llu cancel update log "
1250 DOSTID ".%u : rc = %d\n", obd->obd_name,
1252 POSTID(&cookie->lgc_lgl.lgl_oi),
1253 cookie->lgc_index, rc);
1256 llog_ctxt_put(ctxt);
1263 * Check if there are committed transaction
1265 * Check if there are committed transaction in the distribute transaction
1266 * list, then cancel the update records for those committed transaction.
1267 * Because the distribute transaction in the list are sorted by batchid,
1268 * and cancellation will be done by batchid order, so we only check the first
1269 * the transaction(with lowest batchid) in the list.
1271 * \param[in] lod lod device where cancel thread is
1273 * \retval true if it is ready
1274 * \retval false if it is not ready
1276 static bool tdtd_ready_for_cancel_log(struct target_distribute_txn_data *tdtd)
1278 struct top_multiple_thandle *tmt = NULL;
1279 struct obd_device *obd = tdtd->tdtd_lut->lut_obd;
1282 spin_lock(&tdtd->tdtd_batchid_lock);
1283 if (!list_empty(&tdtd->tdtd_list)) {
1284 tmt = list_entry(tdtd->tdtd_list.next,
1285 struct top_multiple_thandle, tmt_commit_list);
1286 if (tmt->tmt_committed &&
1287 (!obd->obd_recovering || (obd->obd_recovering &&
1288 tmt->tmt_batchid <= tdtd->tdtd_committed_batchid)))
1291 spin_unlock(&tdtd->tdtd_batchid_lock);
1296 struct distribute_txn_bid_data {
1297 struct dt_txn_commit_cb dtbd_cb;
1298 struct target_distribute_txn_data *dtbd_tdtd;
1303 * callback of updating commit batchid
1305 * Updating commit batchid then wake up the commit thread to cancel the
1308 * \param[in]env execution environment
1309 * \param[in]th thandle to updating commit batchid
1310 * \param[in]cb commit callback
1311 * \param[in]err result of thandle
1313 static void distribute_txn_batchid_cb(struct lu_env *env,
1315 struct dt_txn_commit_cb *cb,
1318 struct distribute_txn_bid_data *dtbd = NULL;
1319 struct target_distribute_txn_data *tdtd;
1321 dtbd = container_of0(cb, struct distribute_txn_bid_data, dtbd_cb);
1322 tdtd = dtbd->dtbd_tdtd;
1324 CDEBUG(D_HA, "%s: %llu batchid updated\n",
1325 tdtd->tdtd_lut->lut_obd->obd_name, dtbd->dtbd_batchid);
1326 spin_lock(&tdtd->tdtd_batchid_lock);
1327 if (dtbd->dtbd_batchid > tdtd->tdtd_committed_batchid &&
1328 !tdtd->tdtd_lut->lut_obd->obd_no_transno)
1329 tdtd->tdtd_committed_batchid = dtbd->dtbd_batchid;
1330 spin_unlock(&tdtd->tdtd_batchid_lock);
1331 atomic_dec(&tdtd->tdtd_refcount);
1332 wake_up(&tdtd->tdtd_commit_thread_waitq);
1338 * Update the commit batchid in disk
1340 * Update commit batchid in the disk, after this is committed, it can start
1341 * to cancel the update records.
1343 * \param[in] env execution environment
1344 * \param[in] tdtd distribute transaction structure
1345 * \param[in] batchid commit batchid to be updated
1347 * \retval 0 if update succeeds.
1348 * \retval negative errno if update fails.
1351 distribute_txn_commit_batchid_update(const struct lu_env *env,
1352 struct target_distribute_txn_data *tdtd,
1355 struct distribute_txn_bid_data *dtbd = NULL;
1363 OBD_ALLOC_PTR(dtbd);
1366 dtbd->dtbd_batchid = batchid;
1367 dtbd->dtbd_tdtd = tdtd;
1368 dtbd->dtbd_cb.dcb_func = distribute_txn_batchid_cb;
1369 atomic_inc(&tdtd->tdtd_refcount);
1371 th = dt_trans_create(env, tdtd->tdtd_lut->lut_bottom);
1374 RETURN(PTR_ERR(th));
1377 tmp = cpu_to_le64(batchid);
1379 buf.lb_len = sizeof(tmp);
1382 rc = dt_declare_record_write(env, tdtd->tdtd_batchid_obj, &buf, off,
1387 rc = dt_trans_start_local(env, tdtd->tdtd_lut->lut_bottom, th);
1391 rc = dt_trans_cb_add(th, &dtbd->dtbd_cb);
1395 rc = dt_record_write(env, tdtd->tdtd_batchid_obj, &buf,
1398 CDEBUG(D_INFO, "%s: update batchid "LPU64": rc = %d\n",
1399 tdtd->tdtd_lut->lut_obd->obd_name, batchid, rc);
1402 dt_trans_stop(env, tdtd->tdtd_lut->lut_bottom, th);
1409 * Init commit batchid for distribute transaction.
1411 * Initialize the batchid object and get commit batchid from the object.
1413 * \param[in] env execution environment
1414 * \param[in] tdtd distribute transaction whose batchid is initialized.
1416 * \retval 0 if initialization succeeds.
1417 * \retval negative errno if initialization fails.
1420 distribute_txn_commit_batchid_init(const struct lu_env *env,
1421 struct target_distribute_txn_data *tdtd)
1423 struct tgt_thread_info *tti = tgt_th_info(env);
1424 struct lu_target *lut = tdtd->tdtd_lut;
1425 struct lu_attr *attr = &tti->tti_attr;
1426 struct lu_fid *fid = &tti->tti_fid1;
1427 struct dt_object_format *dof = &tti->tti_u.update.tti_update_dof;
1428 struct dt_object *dt_obj = NULL;
1435 memset(attr, 0, sizeof(*attr));
1436 attr->la_valid = LA_MODE;
1437 attr->la_mode = S_IFREG | S_IRUGO | S_IWUSR;
1438 dof->dof_type = dt_mode_to_dft(S_IFREG);
1440 lu_local_obj_fid(fid, BATCHID_COMMITTED_OID);
1442 dt_obj = dt_find_or_create(env, lut->lut_bottom, fid, dof,
1444 if (IS_ERR(dt_obj)) {
1445 rc = PTR_ERR(dt_obj);
1450 tdtd->tdtd_batchid_obj = dt_obj;
1453 buf.lb_len = sizeof(tmp);
1455 rc = dt_read(env, dt_obj, &buf, &off);
1456 if (rc < 0 || (rc < buf.lb_len && rc > 0)) {
1457 CERROR("%s can't read last committed batchid: rc = %d\n",
1458 tdtd->tdtd_lut->lut_obd->obd_name, rc);
1462 } else if (rc == buf.lb_len) {
1463 tdtd->tdtd_committed_batchid = le64_to_cpu(tmp);
1464 CDEBUG(D_HA, "%s: committed batchid %llu\n",
1465 tdtd->tdtd_lut->lut_obd->obd_name,
1466 tdtd->tdtd_committed_batchid);
1471 if (rc < 0 && dt_obj != NULL) {
1472 lu_object_put(env, &dt_obj->do_lu);
1473 tdtd->tdtd_batchid_obj = NULL;
1479 * manage the distribute transaction thread
1481 * Distribute transaction are linked to the list, and once the distribute
1482 * transaction is committed, it will update the last committed batchid first,
1483 * after it is committed, it will cancel the records.
1485 * \param[in] _arg argument for commit thread
1487 * \retval 0 if thread is running successfully
1488 * \retval negative errno if the thread can not be run.
1490 static int distribute_txn_commit_thread(void *_arg)
1492 struct target_distribute_txn_data *tdtd = _arg;
1493 struct lu_target *lut = tdtd->tdtd_lut;
1494 struct ptlrpc_thread *thread = &lut->lut_tdtd_commit_thread;
1495 struct l_wait_info lwi = { 0 };
1497 struct list_head list;
1499 struct top_multiple_thandle *tmt;
1500 struct top_multiple_thandle *tmp;
1501 __u64 batchid = 0, committed;
1505 rc = lu_env_init(&env, LCT_LOCAL | LCT_MD_THREAD);
1509 spin_lock(&tdtd->tdtd_batchid_lock);
1510 thread->t_flags = SVC_RUNNING;
1511 spin_unlock(&tdtd->tdtd_batchid_lock);
1512 wake_up(&thread->t_ctl_waitq);
1513 INIT_LIST_HEAD(&list);
1515 CDEBUG(D_HA, "%s: start commit thread committed batchid "LPU64"\n",
1516 tdtd->tdtd_lut->lut_obd->obd_name,
1517 tdtd->tdtd_committed_batchid);
1519 while (distribute_txn_commit_thread_running(lut)) {
1520 spin_lock(&tdtd->tdtd_batchid_lock);
1521 list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
1523 if (tmt->tmt_committed == 0)
1526 /* Note: right now, replay is based on master MDT
1527 * transno, but cancellation is based on batchid.
1528 * so we do not try to cancel the update log until
1529 * the recoverying is done, unless the update records
1530 * batchid < committed_batchid. */
1531 if (tmt->tmt_batchid <= tdtd->tdtd_committed_batchid) {
1532 list_move_tail(&tmt->tmt_commit_list, &list);
1533 } else if (!tdtd->tdtd_lut->lut_obd->obd_recovering) {
1534 LASSERTF(tmt->tmt_batchid >= batchid,
1535 "tmt %p tmt_batchid: "LPU64", batchid "
1536 LPU64"\n", tmt, tmt->tmt_batchid,
1538 /* There are three types of distribution
1539 * transaction result
1541 * 1. If tmt_result < 0, it means the
1542 * distribution transaction fails, which should
1543 * be rare, because once declare phase succeeds,
1544 * the operation should succeeds anyway. Note in
1545 * this case, we will still update batchid so
1546 * cancellation would be stopped.
1548 * 2. If tmt_result == 0, it means the
1549 * distribution transaction succeeds, and we
1550 * will update batchid.
1552 * 3. If tmt_result > 0, it means distribute
1553 * transaction is not yet committed on every
1554 * node, but we need release this tmt before
1555 * that, which usuually happens during umount.
1557 if (tmt->tmt_result <= 0)
1558 batchid = tmt->tmt_batchid;
1559 list_move_tail(&tmt->tmt_commit_list, &list);
1562 spin_unlock(&tdtd->tdtd_batchid_lock);
1564 CDEBUG(D_HA, "%s: batchid: "LPU64" committed batchid "
1565 LPU64"\n", tdtd->tdtd_lut->lut_obd->obd_name, batchid,
1566 tdtd->tdtd_committed_batchid);
1567 /* update globally committed on a storage */
1568 if (batchid > tdtd->tdtd_committed_batchid) {
1569 distribute_txn_commit_batchid_update(&env, tdtd,
1571 spin_lock(&tdtd->tdtd_batchid_lock);
1572 if (batchid > tdtd->tdtd_batchid) {
1573 /* This might happen during recovery,
1574 * batchid is initialized as last transno,
1575 * and the batchid in the update records
1576 * on other MDTs might be bigger than
1577 * the batchid, so we need update it to
1578 * avoid duplicate batchid. */
1579 CDEBUG(D_HA, "%s update batchid from "LPU64
1581 tdtd->tdtd_lut->lut_obd->obd_name,
1582 tdtd->tdtd_batchid, batchid);
1583 tdtd->tdtd_batchid = batchid;
1585 spin_unlock(&tdtd->tdtd_batchid_lock);
1587 /* cancel the records for committed batchid's */
1588 /* XXX: should we postpone cancel's till the end of recovery? */
1589 committed = tdtd->tdtd_committed_batchid;
1590 list_for_each_entry_safe(tmt, tmp, &list, tmt_commit_list) {
1591 if (tmt->tmt_batchid > committed)
1593 list_del_init(&tmt->tmt_commit_list);
1594 if (tmt->tmt_result <= 0)
1595 distribute_txn_cancel_records(&env, tmt);
1596 top_multiple_thandle_put(tmt);
1599 l_wait_event(tdtd->tdtd_commit_thread_waitq,
1600 !distribute_txn_commit_thread_running(lut) ||
1601 committed < tdtd->tdtd_committed_batchid ||
1602 tdtd_ready_for_cancel_log(tdtd), &lwi);
1605 l_wait_event(tdtd->tdtd_commit_thread_waitq,
1606 atomic_read(&tdtd->tdtd_refcount) == 0, &lwi);
1608 spin_lock(&tdtd->tdtd_batchid_lock);
1609 list_for_each_entry_safe(tmt, tmp, &tdtd->tdtd_list,
1611 list_move_tail(&tmt->tmt_commit_list, &list);
1612 spin_unlock(&tdtd->tdtd_batchid_lock);
1614 CDEBUG(D_INFO, "%s stopping distribute txn commit thread.\n",
1615 tdtd->tdtd_lut->lut_obd->obd_name);
1616 list_for_each_entry_safe(tmt, tmp, &list, tmt_commit_list) {
1617 list_del_init(&tmt->tmt_commit_list);
1618 top_multiple_thandle_dump(tmt, D_HA);
1619 top_multiple_thandle_put(tmt);
1622 thread->t_flags = SVC_STOPPED;
1624 wake_up(&thread->t_ctl_waitq);
1630 * Start llog cancel thread
1632 * Start llog cancel(master/slave) thread on LOD
1634 * \param[in]lclt cancel log thread to be started.
1636 * \retval 0 if the thread is started successfully.
1637 * \retval negative errno if the thread is not being
1640 int distribute_txn_init(const struct lu_env *env,
1641 struct lu_target *lut,
1642 struct target_distribute_txn_data *tdtd,
1645 struct task_struct *task;
1646 struct l_wait_info lwi = { 0 };
1650 spin_lock_init(&tdtd->tdtd_batchid_lock);
1651 INIT_LIST_HEAD(&tdtd->tdtd_list);
1653 tdtd->tdtd_batchid = lut->lut_last_transno + 1;
1655 init_waitqueue_head(&lut->lut_tdtd_commit_thread.t_ctl_waitq);
1656 init_waitqueue_head(&tdtd->tdtd_commit_thread_waitq);
1657 atomic_set(&tdtd->tdtd_refcount, 0);
1659 tdtd->tdtd_lut = lut;
1660 rc = distribute_txn_commit_batchid_init(env, tdtd);
1664 task = kthread_run(distribute_txn_commit_thread, tdtd, "tdtd-%u",
1667 RETURN(PTR_ERR(task));
1669 l_wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
1670 distribute_txn_commit_thread_running(lut) ||
1671 distribute_txn_commit_thread_stopped(lut), &lwi);
1674 EXPORT_SYMBOL(distribute_txn_init);
1677 * Stop llog cancel thread
1679 * Stop llog cancel(master/slave) thread on LOD and also destory
1680 * all of transaction in the list.
1682 * \param[in]lclt cancel log thread to be stopped.
1684 void distribute_txn_fini(const struct lu_env *env,
1685 struct target_distribute_txn_data *tdtd)
1687 struct lu_target *lut = tdtd->tdtd_lut;
1689 /* Stop cancel thread */
1690 if (lut == NULL || !distribute_txn_commit_thread_running(lut))
1693 spin_lock(&tdtd->tdtd_batchid_lock);
1694 lut->lut_tdtd_commit_thread.t_flags = SVC_STOPPING;
1695 spin_unlock(&tdtd->tdtd_batchid_lock);
1696 wake_up(&tdtd->tdtd_commit_thread_waitq);
1697 wait_event(lut->lut_tdtd_commit_thread.t_ctl_waitq,
1698 lut->lut_tdtd_commit_thread.t_flags & SVC_STOPPED);
1700 dtrq_list_destroy(tdtd);
1701 if (tdtd->tdtd_batchid_obj != NULL) {
1702 lu_object_put(env, &tdtd->tdtd_batchid_obj->do_lu);
1703 tdtd->tdtd_batchid_obj = NULL;
1706 EXPORT_SYMBOL(distribute_txn_fini);