4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Wang Di <wangdi@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_LOV
38 #include <linux/math64.h>
39 #include <linux/sort.h>
40 #include <libcfs/libcfs.h>
42 #include <obd_class.h>
43 #include "lov_internal.h"
46 lu_extent_le_to_cpu(struct lu_extent *dst, const struct lu_extent *src)
48 dst->e_start = le64_to_cpu(src->e_start);
49 dst->e_end = le64_to_cpu(src->e_end);
53 * Find minimum stripe maxbytes value. For inactive or
54 * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
56 static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
58 struct obd_import *imp;
59 loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
64 imp = tgt->ltd_obd->u.cli.cl_import;
68 spin_lock(&imp->imp_lock);
69 if ((imp->imp_state == LUSTRE_IMP_FULL ||
70 imp->imp_state == LUSTRE_IMP_IDLE) &&
71 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
72 imp->imp_connect_data.ocd_maxbytes > 0)
73 maxbytes = imp->imp_connect_data.ocd_maxbytes;
75 spin_unlock(&imp->imp_lock);
80 static int lsm_lmm_verify_v1v3(struct lov_mds_md *lmm, size_t lmm_size,
83 u32 pattern = le32_to_cpu(lmm->lmm_pattern);
86 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
88 CERROR("lov: bad stripe count %d: rc = %d\n",
90 lov_dump_lmm_common(D_WARNING, lmm);
94 if (lmm_oi_id(&lmm->lmm_oi) == 0) {
96 CERROR("lov: zero object id: rc = %d\n", rc);
97 lov_dump_lmm_common(D_WARNING, lmm);
101 if (!lov_pattern_supported(lov_pattern(pattern))) {
103 CERROR("lov: unrecognized striping pattern: rc = %d\n", rc);
104 lov_dump_lmm_common(D_WARNING, lmm);
108 if (lmm->lmm_stripe_size == 0 ||
109 (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
111 CERROR("lov: bad stripe size %u: rc = %d\n",
112 le32_to_cpu(lmm->lmm_stripe_size), rc);
113 lov_dump_lmm_common(D_WARNING, lmm);
121 static void lsme_free(struct lov_stripe_md_entry *lsme)
123 unsigned int stripe_count;
127 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN) {
129 * TODO: In addition to HSM foreign layout, It needs to add
130 * support for other kinds of foreign layout types such as
131 * DAOS, S3. When add these supports, it will use non-inline
132 * @lov_hsm_base to store layout information, and need to
133 * free extra allocated buffer.
135 OBD_FREE_LARGE(lsme, sizeof(*lsme));
139 stripe_count = lsme->lsme_stripe_count;
140 if (!lsme_inited(lsme) ||
141 lsme->lsme_pattern & LOV_PATTERN_F_RELEASED)
143 for (i = 0; i < stripe_count; i++)
144 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
146 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
147 OBD_FREE_LARGE(lsme, lsme_size);
150 void lsm_free(struct lov_stripe_md *lsm)
152 unsigned int entry_count = lsm->lsm_entry_count;
156 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
157 OBD_FREE_LARGE(lsm_foreign(lsm), lsm->lsm_foreign_size);
159 for (i = 0; i < entry_count; i++)
160 lsme_free(lsm->lsm_entries[i]);
163 lsm_size = lsm->lsm_magic == LOV_MAGIC_FOREIGN ?
164 offsetof(typeof(*lsm), lsm_entries[1]) :
165 offsetof(typeof(*lsm), lsm_entries[entry_count]);
166 OBD_FREE(lsm, lsm_size);
170 * Unpack a struct lov_mds_md into a struct lov_stripe_md_entry.
172 * The caller should set id and extent.
174 static struct lov_stripe_md_entry *
175 lsme_unpack(struct lov_obd *lov, struct lov_mds_md *lmm, size_t buf_size,
176 const char *pool_name, bool inited, struct lov_ost_data_v1 *objects,
179 struct lov_stripe_md_entry *lsme;
181 loff_t min_stripe_maxbytes = 0;
185 unsigned int stripe_count;
189 magic = le32_to_cpu(lmm->lmm_magic);
190 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
191 RETURN(ERR_PTR(-EINVAL));
193 pattern = le32_to_cpu(lmm->lmm_pattern);
194 if (pattern & LOV_PATTERN_F_RELEASED || !inited)
197 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
199 if (buf_size < lov_mds_md_size(stripe_count, magic)) {
200 CERROR("LOV EA %s too small: %zu, need %u\n",
201 magic == LOV_MAGIC_V1 ? "V1" : "V3", buf_size,
202 lov_mds_md_size(stripe_count, magic == LOV_MAGIC_V1 ?
203 LOV_MAGIC_V1 : LOV_MAGIC_V3));
204 lov_dump_lmm_common(D_WARNING, lmm);
205 return ERR_PTR(-EINVAL);
208 rc = lsm_lmm_verify_v1v3(lmm, buf_size, stripe_count);
212 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
213 OBD_ALLOC_LARGE(lsme, lsme_size);
215 RETURN(ERR_PTR(-ENOMEM));
217 lsme->lsme_magic = magic;
218 lsme->lsme_pattern = pattern;
219 lsme->lsme_flags = 0;
220 lsme->lsme_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
221 /* preserve the possible -1 stripe count for uninstantiated component */
222 lsme->lsme_stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
223 lsme->lsme_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
226 size_t pool_name_len;
228 pool_name_len = strlcpy(lsme->lsme_pool_name, pool_name,
229 sizeof(lsme->lsme_pool_name));
230 if (pool_name_len >= sizeof(lsme->lsme_pool_name))
231 GOTO(out_lsme, rc = -E2BIG);
234 /* with Data-on-MDT set maxbytes to stripe size */
235 if (lsme_is_dom(lsme)) {
237 lov_bytes = lsme->lsme_stripe_size;
244 for (i = 0; i < stripe_count; i++) {
245 struct lov_oinfo *loi;
246 struct lov_tgt_desc *ltd;
248 OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
250 GOTO(out_lsme, rc = -ENOMEM);
252 lsme->lsme_oinfo[i] = loi;
254 ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
255 loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
256 loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
257 if (lov_oinfo_is_dummy(loi))
260 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
261 !lov2obd(lov)->obd_process_conf) {
262 CERROR("%s: OST index %d more than OST count %d\n",
263 (char*)lov->desc.ld_uuid.uuid,
264 loi->loi_ost_idx, lov->desc.ld_tgt_count);
265 lov_dump_lmm_v1(D_WARNING, lmm);
266 GOTO(out_lsme, rc = -EINVAL);
269 ltd = lov->lov_tgts[loi->loi_ost_idx];
271 CERROR("%s: OST index %d missing\n",
272 (char*)lov->desc.ld_uuid.uuid, loi->loi_ost_idx);
273 lov_dump_lmm_v1(D_WARNING, lmm);
277 lov_bytes = lov_tgt_maxbytes(ltd);
278 if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes)
279 min_stripe_maxbytes = lov_bytes;
283 if (min_stripe_maxbytes == 0)
284 min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
286 if (stripe_count == 0)
287 stripe_count = lov->desc.ld_tgt_count;
289 if (min_stripe_maxbytes <= LLONG_MAX / stripe_count)
290 lov_bytes = min_stripe_maxbytes * stripe_count;
292 lov_bytes = MAX_LFS_FILESIZE;
294 *maxbytes = min_t(loff_t, lov_bytes, MAX_LFS_FILESIZE);
301 for (i = 0; i < stripe_count; i++) {
302 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
305 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
307 OBD_FREE_LARGE(lsme, lsme_size);
313 lov_stripe_md *lsm_unpackmd_v1v3(struct lov_obd *lov, struct lov_mds_md *lmm,
314 size_t buf_size, const char *pool_name,
315 struct lov_ost_data_v1 *objects)
317 struct lov_stripe_md *lsm;
318 struct lov_stripe_md_entry *lsme;
324 pattern = le32_to_cpu(lmm->lmm_pattern);
326 lsme = lsme_unpack(lov, lmm, buf_size, pool_name, true, objects,
329 RETURN(ERR_CAST(lsme));
331 lsme->lsme_flags = LCME_FL_INIT;
332 lsme->lsme_extent.e_start = 0;
333 lsme->lsme_extent.e_end = LUSTRE_EOF;
335 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
336 OBD_ALLOC(lsm, lsm_size);
338 GOTO(out_lsme, rc = -ENOMEM);
340 atomic_set(&lsm->lsm_refc, 1);
341 spin_lock_init(&lsm->lsm_lock);
342 lsm->lsm_maxbytes = maxbytes;
343 lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
344 lsm->lsm_magic = le32_to_cpu(lmm->lmm_magic);
345 lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
346 lsm->lsm_entry_count = 1;
347 lsm->lsm_is_released = pattern & LOV_PATTERN_F_RELEASED;
348 lsm->lsm_entries[0] = lsme;
358 static struct lov_stripe_md *
359 lsm_unpackmd_v1(struct lov_obd *lov, void *buf, size_t buf_size)
361 struct lov_mds_md_v1 *lmm = buf;
363 return lsm_unpackmd_v1v3(lov, buf, buf_size, NULL, lmm->lmm_objects);
366 static const struct lsm_operations lsm_v1_ops = {
367 .lsm_unpackmd = lsm_unpackmd_v1,
370 static struct lov_stripe_md *
371 lsm_unpackmd_v3(struct lov_obd *lov, void *buf, size_t buf_size)
373 struct lov_mds_md_v3 *lmm = buf;
375 return lsm_unpackmd_v1v3(lov, buf, buf_size, lmm->lmm_pool_name,
379 static const struct lsm_operations lsm_v3_ops = {
380 .lsm_unpackmd = lsm_unpackmd_v3,
383 static int lsm_verify_comp_md_v1(struct lov_comp_md_v1 *lcm,
386 unsigned int entry_count;
390 lcm_size = le32_to_cpu(lcm->lcm_size);
391 if (lcm_buf_size < lcm_size) {
392 CERROR("bad LCM buffer size %zu, expected %zu\n",
393 lcm_buf_size, lcm_size);
397 entry_count = le16_to_cpu(lcm->lcm_entry_count);
398 for (i = 0; i < entry_count; i++) {
399 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
403 blob_offset = le32_to_cpu(lcme->lcme_offset);
404 blob_size = le32_to_cpu(lcme->lcme_size);
406 if (lcm_size < blob_offset || lcm_size < blob_size ||
407 lcm_size < blob_offset + blob_size) {
408 CERROR("LCM entry %u has invalid blob: "
409 "LCM size = %zu, offset = %zu, size = %zu\n",
410 le32_to_cpu(lcme->lcme_id),
411 lcm_size, blob_offset, blob_size);
419 static struct lov_stripe_md_entry *
420 lsme_unpack_foreign(struct lov_obd *lov, void *buf, size_t buf_size,
421 bool inited, loff_t *maxbytes)
423 struct lov_stripe_md_entry *lsme;
424 struct lov_foreign_md *lfm = buf;
429 magic = le32_to_cpu(lfm->lfm_magic);
430 if (magic != LOV_MAGIC_FOREIGN)
431 RETURN(ERR_PTR(-EINVAL));
433 OBD_ALLOC_LARGE(lsme, sizeof(*lsme));
435 RETURN(ERR_PTR(-ENOMEM));
437 lsme->lsme_magic = magic;
438 lsme->lsme_pattern = LOV_PATTERN_FOREIGN;
439 lsme->lsme_flags = 0;
442 *maxbytes = MAX_LFS_FILESIZE;
447 static struct lov_stripe_md_entry *
448 lsme_unpack_comp(struct lov_obd *lov, struct lov_mds_md *lmm,
449 size_t lmm_buf_size, bool inited, loff_t *maxbytes)
453 magic = le32_to_cpu(lmm->lmm_magic);
454 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3 &&
455 magic != LOV_MAGIC_FOREIGN)
456 RETURN(ERR_PTR(-EINVAL));
458 if (magic != LOV_MAGIC_FOREIGN &&
459 le16_to_cpu(lmm->lmm_stripe_count) == 0 &&
460 lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_MDT)
461 RETURN(ERR_PTR(-EINVAL));
463 if (magic == LOV_MAGIC_V1) {
464 return lsme_unpack(lov, lmm, lmm_buf_size, NULL,
465 inited, lmm->lmm_objects, maxbytes);
466 } else if (magic == LOV_MAGIC_V3) {
467 struct lov_mds_md_v3 *lmm3 = (struct lov_mds_md_v3 *)lmm;
469 return lsme_unpack(lov, lmm, lmm_buf_size, lmm3->lmm_pool_name,
470 inited, lmm3->lmm_objects, maxbytes);
471 } else { /* LOV_MAGIC_FOREIGN */
472 return lsme_unpack_foreign(lov, lmm, lmm_buf_size,
477 static struct lov_stripe_md *
478 lsm_unpackmd_comp_md_v1(struct lov_obd *lov, void *buf, size_t buf_size)
480 struct lov_comp_md_v1 *lcm = buf;
481 struct lov_stripe_md *lsm;
483 unsigned int entry_count = 0;
488 rc = lsm_verify_comp_md_v1(buf, buf_size);
492 entry_count = le16_to_cpu(lcm->lcm_entry_count);
494 lsm_size = offsetof(typeof(*lsm), lsm_entries[entry_count]);
495 OBD_ALLOC(lsm, lsm_size);
497 return ERR_PTR(-ENOMEM);
499 atomic_set(&lsm->lsm_refc, 1);
500 spin_lock_init(&lsm->lsm_lock);
501 lsm->lsm_magic = le32_to_cpu(lcm->lcm_magic);
502 lsm->lsm_layout_gen = le32_to_cpu(lcm->lcm_layout_gen);
503 lsm->lsm_entry_count = entry_count;
504 lsm->lsm_mirror_count = le16_to_cpu(lcm->lcm_mirror_count);
505 lsm->lsm_flags = le16_to_cpu(lcm->lcm_flags);
506 lsm->lsm_is_released = true;
507 lsm->lsm_maxbytes = LLONG_MIN;
509 for (i = 0; i < entry_count; i++) {
510 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
511 struct lov_stripe_md_entry *lsme;
516 blob_offset = le32_to_cpu(lcme->lcme_offset);
517 blob_size = le32_to_cpu(lcme->lcme_size);
518 blob = (char *)lcm + blob_offset;
520 lsme = lsme_unpack_comp(lov, blob, blob_size,
521 le32_to_cpu(lcme->lcme_flags) &
523 (i == entry_count - 1) ? &maxbytes :
526 GOTO(out_lsm, rc = PTR_ERR(lsme));
528 if (!(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
529 lsm->lsm_is_released = false;
531 lsm->lsm_entries[i] = lsme;
532 lsme->lsme_id = le32_to_cpu(lcme->lcme_id);
533 lsme->lsme_flags = le32_to_cpu(lcme->lcme_flags);
534 if (lsme->lsme_flags & LCME_FL_NOSYNC)
535 lsme->lsme_timestamp =
536 le64_to_cpu(lcme->lcme_timestamp);
537 lu_extent_le_to_cpu(&lsme->lsme_extent, &lcme->lcme_extent);
539 if (i == entry_count - 1) {
540 lsm->lsm_maxbytes = (loff_t)lsme->lsme_extent.e_start +
543 * the last component hasn't been defined, or
544 * lsm_maxbytes overflowed.
546 if (!lsme_is_dom(lsme) &&
547 (lsme->lsme_extent.e_end != LUSTRE_EOF ||
549 (loff_t)lsme->lsme_extent.e_start))
550 lsm->lsm_maxbytes = MAX_LFS_FILESIZE;
557 for (i = 0; i < entry_count; i++)
558 if (lsm->lsm_entries[i])
559 lsme_free(lsm->lsm_entries[i]);
561 OBD_FREE(lsm, lsm_size);
566 static const struct lsm_operations lsm_comp_md_v1_ops = {
567 .lsm_unpackmd = lsm_unpackmd_comp_md_v1,
571 lov_stripe_md *lsm_unpackmd_foreign(struct lov_obd *lov, void *buf,
574 struct lov_foreign_md *lfm = buf;
575 struct lov_stripe_md *lsm;
577 struct lov_stripe_md_entry *lsme;
579 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
580 OBD_ALLOC(lsm, lsm_size);
582 RETURN(ERR_PTR(-ENOMEM));
584 atomic_set(&lsm->lsm_refc, 1);
585 spin_lock_init(&lsm->lsm_lock);
586 lsm->lsm_magic = le32_to_cpu(lfm->lfm_magic);
587 lsm->lsm_foreign_size = foreign_size_le(lfm);
589 /* alloc for full foreign EA including format fields */
590 OBD_ALLOC_LARGE(lsme, lsm->lsm_foreign_size);
592 OBD_FREE(lsm, lsm_size);
593 RETURN(ERR_PTR(-ENOMEM));
596 /* copy full foreign EA including format fields */
597 memcpy(lsme, buf, lsm->lsm_foreign_size);
599 lsm_foreign(lsm) = lsme;
604 static const struct lsm_operations lsm_foreign_ops = {
605 .lsm_unpackmd = lsm_unpackmd_foreign,
608 const struct lsm_operations *lsm_op_find(int magic)
615 case LOV_MAGIC_COMP_V1:
616 return &lsm_comp_md_v1_ops;
617 case LOV_MAGIC_FOREIGN:
618 return &lsm_foreign_ops;
620 CERROR("unrecognized lsm_magic %08x\n", magic);
625 void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
630 "lsm %p, objid "DOSTID", maxbytes %#llx, magic 0x%08X, refc: %d, entry: %u, mirror: %u, flags: %u,layout_gen %u\n",
631 lsm, POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
632 atomic_read(&lsm->lsm_refc), lsm->lsm_entry_count,
633 lsm->lsm_mirror_count, lsm->lsm_flags, lsm->lsm_layout_gen);
635 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
636 struct lov_foreign_md *lfm = (void *)lsm_foreign(lsm);
639 "foreign LOV EA, magic %x, length %u, type %x, flags %x, value '%.*s'\n",
640 lfm->lfm_magic, lfm->lfm_length, lfm->lfm_type,
641 lfm->lfm_flags, lfm->lfm_length, lfm->lfm_value);
645 for (i = 0; i < lsm->lsm_entry_count; i++) {
646 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
648 CDEBUG(level, DEXT ": id: %u, flags: %x, "
649 "magic 0x%08X, layout_gen %u, "
650 "stripe count %u, sstripe size %u, "
651 "pool: ["LOV_POOLNAMEF"]\n",
652 PEXT(&lse->lsme_extent), lse->lsme_id, lse->lsme_flags,
653 lse->lsme_magic, lse->lsme_layout_gen,
654 lse->lsme_stripe_count, lse->lsme_stripe_size,
655 lse->lsme_pool_name);
656 if (!lsme_inited(lse) ||
657 lse->lsme_pattern & LOV_PATTERN_F_RELEASED)
659 for (j = 0; j < lse->lsme_stripe_count; j++) {
660 CDEBUG(level, " oinfo:%p: ostid: "DOSTID
661 " ost idx: %d gen: %d\n",
663 POSTID(&lse->lsme_oinfo[j]->loi_oi),
664 lse->lsme_oinfo[j]->loi_ost_idx,
665 lse->lsme_oinfo[j]->loi_ost_gen);
670 int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset)
674 for (i = 0; i < lsm->lsm_entry_count; i++) {
675 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
677 if ((offset >= lse->lsme_extent.e_start &&
678 offset < lse->lsme_extent.e_end) ||
679 (offset == OBD_OBJECT_EOF &&
680 lse->lsme_extent.e_end == OBD_OBJECT_EOF))
688 * lmm_layout_gen overlaps stripe_offset field, it needs to be reset back when
689 * sending to MDT for passing striping checks
691 void lov_fix_ea_for_replay(void *lovea)
693 struct lov_user_md *lmm = lovea;
694 struct lov_comp_md_v1 *c1;
697 switch (le32_to_cpu(lmm->lmm_magic)) {
698 case LOV_USER_MAGIC_V1:
699 case LOV_USER_MAGIC_V3:
700 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
703 case LOV_USER_MAGIC_COMP_V1:
705 for (i = 0; i < le16_to_cpu(c1->lcm_entry_count); i++) {
706 struct lov_comp_md_entry_v1 *ent = &c1->lcm_entries[i];
708 if (le32_to_cpu(ent->lcme_flags) & LCME_FL_INIT) {
709 lmm = (void *)((char *)c1 +
710 le32_to_cpu(ent->lcme_offset));
711 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
716 EXPORT_SYMBOL(lov_fix_ea_for_replay);