list_del_init(&dtrq->dtrq_list);
dtrq_destroy(dtrq);
}
+ list_for_each_entry_safe(dtrq, tmp, &tdtd->tdtd_replay_finish_list,
+ dtrq_list) {
+ list_del_init(&dtrq->dtrq_list);
+ dtrq_destroy(dtrq);
+ }
spin_unlock(&tdtd->tdtd_replay_list_lock);
}
EXPORT_SYMBOL(dtrq_list_destroy);
}
EXPORT_SYMBOL(distribute_txn_get_next_transno);
+struct distribute_txn_replay_req *
+distribute_txn_lookup_finish_list(struct target_distribute_txn_data *tdtd,
+ __u64 xid)
+{
+ struct distribute_txn_replay_req *dtrq = NULL;
+ struct distribute_txn_replay_req *iter;
+
+ spin_lock(&tdtd->tdtd_replay_list_lock);
+ list_for_each_entry(iter, &tdtd->tdtd_replay_finish_list, dtrq_list) {
+ if (iter->dtrq_xid == xid) {
+ dtrq = iter;
+ break;
+ }
+ }
+ spin_unlock(&tdtd->tdtd_replay_list_lock);
+ return dtrq;
+}
+
+bool is_req_replayed_by_update(struct ptlrpc_request *req)
+{
+ struct lu_target *tgt = class_exp2tgt(req->rq_export);
+ struct distribute_txn_replay_req *dtrq;
+
+ if (tgt->lut_tdtd == NULL)
+ return false;
+
+ dtrq = distribute_txn_lookup_finish_list(tgt->lut_tdtd, req->rq_xid);
+ if (dtrq == NULL)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(is_req_replayed_by_update);
+
/**
* Check if the update of one object is committed
*
struct target_distribute_txn_data *tdtd,
struct thandle *th,
struct thandle *master_th,
+ struct distribute_txn_replay_req *dtrq,
struct tx_arg *ta_arg)
{
struct tgt_session_info *tsi;
tsi->tsi_opdata = lrd->lrd_data;
tsi->tsi_result = lrd->lrd_result;
tsi->tsi_client_gen = lrd->lrd_client_gen;
+ dtrq->dtrq_xid = lrd->lrd_xid;
top_th = container_of(th, struct top_thandle, tt_super);
top_th->tt_master_sub_thandle = master_th;
cfs_hash_putref(hash);
* tgt_last_rcvd_update() can be called correctly */
if (rc == 0 && dt_obj == tdtd->tdtd_lut->lut_reply_data)
update_recovery_update_ses(env, tdtd, th,
- st->st_sub_th, ta_arg);
+ st->st_sub_th, dtrq, ta_arg);
if (unlikely(rc < 0)) {
CDEBUG(D_HA, "error during execution of #%u from"