4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Wang Di <wangdi@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_LOV
39 #include <linux/math64.h>
40 #include <linux/sort.h>
41 #include <libcfs/libcfs.h>
43 #include <obd_class.h>
44 #include "lov_internal.h"
47 lu_extent_le_to_cpu(struct lu_extent *dst, const struct lu_extent *src)
49 dst->e_start = le64_to_cpu(src->e_start);
50 dst->e_end = le64_to_cpu(src->e_end);
54 * Find minimum stripe maxbytes value. For inactive or
55 * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
57 static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
59 struct obd_import *imp;
60 loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
65 imp = tgt->ltd_obd->u.cli.cl_import;
69 spin_lock(&imp->imp_lock);
70 if ((imp->imp_state == LUSTRE_IMP_FULL ||
71 imp->imp_state == LUSTRE_IMP_IDLE) &&
72 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
73 imp->imp_connect_data.ocd_maxbytes > 0)
74 maxbytes = imp->imp_connect_data.ocd_maxbytes;
76 spin_unlock(&imp->imp_lock);
81 static int lsm_lmm_verify_v1v3(struct lov_mds_md *lmm, size_t lmm_size,
84 u32 pattern = le32_to_cpu(lmm->lmm_pattern);
87 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
89 CERROR("lov: bad stripe count %d: rc = %d\n",
91 lov_dump_lmm_common(D_WARNING, lmm);
95 if (lmm_oi_id(&lmm->lmm_oi) == 0) {
97 CERROR("lov: zero object id: rc = %d\n", rc);
98 lov_dump_lmm_common(D_WARNING, lmm);
102 if (!lov_pattern_supported(lov_pattern(pattern))) {
104 CERROR("lov: unrecognized striping pattern: rc = %d\n", rc);
105 lov_dump_lmm_common(D_WARNING, lmm);
109 if (lmm->lmm_stripe_size == 0 ||
110 (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
112 CERROR("lov: bad stripe size %u: rc = %d\n",
113 le32_to_cpu(lmm->lmm_stripe_size), rc);
114 lov_dump_lmm_common(D_WARNING, lmm);
122 static void lsme_free(struct lov_stripe_md_entry *lsme)
124 unsigned int stripe_count;
128 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN) {
130 * TODO: In addition to HSM foreign layout, It needs to add
131 * support for other kinds of foreign layout types such as
132 * DAOS, S3. When add these supports, it will use non-inline
133 * @lov_hsm_base to store layout information, and need to
134 * free extra allocated buffer.
136 OBD_FREE_LARGE(lsme, sizeof(*lsme));
140 stripe_count = lsme->lsme_stripe_count;
141 if (!lsme_inited(lsme) ||
142 lsme->lsme_pattern & LOV_PATTERN_F_RELEASED)
144 for (i = 0; i < stripe_count; i++)
145 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
147 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
148 OBD_FREE_LARGE(lsme, lsme_size);
151 void lsm_free(struct lov_stripe_md *lsm)
153 unsigned int entry_count = lsm->lsm_entry_count;
157 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
158 OBD_FREE_LARGE(lsm_foreign(lsm), lsm->lsm_foreign_size);
160 for (i = 0; i < entry_count; i++)
161 lsme_free(lsm->lsm_entries[i]);
164 lsm_size = lsm->lsm_magic == LOV_MAGIC_FOREIGN ?
165 offsetof(typeof(*lsm), lsm_entries[1]) :
166 offsetof(typeof(*lsm), lsm_entries[entry_count]);
167 OBD_FREE(lsm, lsm_size);
171 * Unpack a struct lov_mds_md into a struct lov_stripe_md_entry.
173 * The caller should set id and extent.
175 static struct lov_stripe_md_entry *
176 lsme_unpack(struct lov_obd *lov, struct lov_mds_md *lmm, size_t buf_size,
177 const char *pool_name, bool inited, struct lov_ost_data_v1 *objects,
180 struct lov_stripe_md_entry *lsme;
182 loff_t min_stripe_maxbytes = 0;
186 unsigned int stripe_count;
190 magic = le32_to_cpu(lmm->lmm_magic);
191 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
192 RETURN(ERR_PTR(-EINVAL));
194 pattern = le32_to_cpu(lmm->lmm_pattern);
195 if (pattern & LOV_PATTERN_F_RELEASED || !inited)
198 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
200 if (buf_size < lov_mds_md_size(stripe_count, magic)) {
201 CERROR("LOV EA %s too small: %zu, need %u\n",
202 magic == LOV_MAGIC_V1 ? "V1" : "V3", buf_size,
203 lov_mds_md_size(stripe_count, magic == LOV_MAGIC_V1 ?
204 LOV_MAGIC_V1 : LOV_MAGIC_V3));
205 lov_dump_lmm_common(D_WARNING, lmm);
206 return ERR_PTR(-EINVAL);
209 rc = lsm_lmm_verify_v1v3(lmm, buf_size, stripe_count);
213 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
214 OBD_ALLOC_LARGE(lsme, lsme_size);
216 RETURN(ERR_PTR(-ENOMEM));
218 lsme->lsme_magic = magic;
219 lsme->lsme_pattern = pattern;
220 lsme->lsme_flags = 0;
221 lsme->lsme_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
222 /* preserve the possible -1 stripe count for uninstantiated component */
223 lsme->lsme_stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
224 lsme->lsme_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
227 size_t pool_name_len;
229 pool_name_len = strlcpy(lsme->lsme_pool_name, pool_name,
230 sizeof(lsme->lsme_pool_name));
231 if (pool_name_len >= sizeof(lsme->lsme_pool_name))
232 GOTO(out_lsme, rc = -E2BIG);
235 /* with Data-on-MDT set maxbytes to stripe size */
236 if (lsme_is_dom(lsme)) {
238 lov_bytes = lsme->lsme_stripe_size;
245 for (i = 0; i < stripe_count; i++) {
246 struct lov_oinfo *loi;
247 struct lov_tgt_desc *ltd;
249 OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
251 GOTO(out_lsme, rc = -ENOMEM);
253 lsme->lsme_oinfo[i] = loi;
255 ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
256 loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
257 loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
258 if (lov_oinfo_is_dummy(loi))
261 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
262 !lov2obd(lov)->obd_process_conf) {
263 CERROR("%s: OST index %d more than OST count %d\n",
264 (char*)lov->desc.ld_uuid.uuid,
265 loi->loi_ost_idx, lov->desc.ld_tgt_count);
266 lov_dump_lmm_v1(D_WARNING, lmm);
267 GOTO(out_lsme, rc = -EINVAL);
270 ltd = lov->lov_tgts[loi->loi_ost_idx];
272 CERROR("%s: OST index %d missing\n",
273 (char*)lov->desc.ld_uuid.uuid, loi->loi_ost_idx);
274 lov_dump_lmm_v1(D_WARNING, lmm);
278 lov_bytes = lov_tgt_maxbytes(ltd);
279 if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes)
280 min_stripe_maxbytes = lov_bytes;
284 if (min_stripe_maxbytes == 0)
285 min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
287 if (stripe_count == 0)
288 stripe_count = lov->desc.ld_tgt_count;
290 if (min_stripe_maxbytes <= LLONG_MAX / stripe_count)
291 lov_bytes = min_stripe_maxbytes * stripe_count;
293 lov_bytes = MAX_LFS_FILESIZE;
295 *maxbytes = min_t(loff_t, lov_bytes, MAX_LFS_FILESIZE);
302 for (i = 0; i < stripe_count; i++) {
303 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
306 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
308 OBD_FREE_LARGE(lsme, lsme_size);
314 lov_stripe_md *lsm_unpackmd_v1v3(struct lov_obd *lov, struct lov_mds_md *lmm,
315 size_t buf_size, const char *pool_name,
316 struct lov_ost_data_v1 *objects)
318 struct lov_stripe_md *lsm;
319 struct lov_stripe_md_entry *lsme;
325 pattern = le32_to_cpu(lmm->lmm_pattern);
327 lsme = lsme_unpack(lov, lmm, buf_size, pool_name, true, objects,
330 RETURN(ERR_CAST(lsme));
332 lsme->lsme_flags = LCME_FL_INIT;
333 lsme->lsme_extent.e_start = 0;
334 lsme->lsme_extent.e_end = LUSTRE_EOF;
336 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
337 OBD_ALLOC(lsm, lsm_size);
339 GOTO(out_lsme, rc = -ENOMEM);
341 atomic_set(&lsm->lsm_refc, 1);
342 spin_lock_init(&lsm->lsm_lock);
343 lsm->lsm_maxbytes = maxbytes;
344 lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
345 lsm->lsm_magic = le32_to_cpu(lmm->lmm_magic);
346 lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
347 lsm->lsm_entry_count = 1;
348 lsm->lsm_is_released = pattern & LOV_PATTERN_F_RELEASED;
349 lsm->lsm_entries[0] = lsme;
359 static struct lov_stripe_md *
360 lsm_unpackmd_v1(struct lov_obd *lov, void *buf, size_t buf_size)
362 struct lov_mds_md_v1 *lmm = buf;
364 return lsm_unpackmd_v1v3(lov, buf, buf_size, NULL, lmm->lmm_objects);
367 static const struct lsm_operations lsm_v1_ops = {
368 .lsm_unpackmd = lsm_unpackmd_v1,
371 static struct lov_stripe_md *
372 lsm_unpackmd_v3(struct lov_obd *lov, void *buf, size_t buf_size)
374 struct lov_mds_md_v3 *lmm = buf;
376 return lsm_unpackmd_v1v3(lov, buf, buf_size, lmm->lmm_pool_name,
380 static const struct lsm_operations lsm_v3_ops = {
381 .lsm_unpackmd = lsm_unpackmd_v3,
384 static int lsm_verify_comp_md_v1(struct lov_comp_md_v1 *lcm,
387 unsigned int entry_count;
391 lcm_size = le32_to_cpu(lcm->lcm_size);
392 if (lcm_buf_size < lcm_size) {
393 CERROR("bad LCM buffer size %zu, expected %zu\n",
394 lcm_buf_size, lcm_size);
398 entry_count = le16_to_cpu(lcm->lcm_entry_count);
399 for (i = 0; i < entry_count; i++) {
400 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
404 blob_offset = le32_to_cpu(lcme->lcme_offset);
405 blob_size = le32_to_cpu(lcme->lcme_size);
407 if (lcm_size < blob_offset || lcm_size < blob_size ||
408 lcm_size < blob_offset + blob_size) {
409 CERROR("LCM entry %u has invalid blob: "
410 "LCM size = %zu, offset = %zu, size = %zu\n",
411 le32_to_cpu(lcme->lcme_id),
412 lcm_size, blob_offset, blob_size);
420 static struct lov_stripe_md_entry *
421 lsme_unpack_foreign(struct lov_obd *lov, void *buf, size_t buf_size,
422 bool inited, loff_t *maxbytes)
424 struct lov_stripe_md_entry *lsme;
425 struct lov_foreign_md *lfm = buf;
430 magic = le32_to_cpu(lfm->lfm_magic);
431 if (magic != LOV_MAGIC_FOREIGN)
432 RETURN(ERR_PTR(-EINVAL));
434 OBD_ALLOC_LARGE(lsme, sizeof(*lsme));
436 RETURN(ERR_PTR(-ENOMEM));
438 lsme->lsme_magic = magic;
439 lsme->lsme_pattern = LOV_PATTERN_FOREIGN;
440 lsme->lsme_flags = 0;
443 *maxbytes = MAX_LFS_FILESIZE;
448 static struct lov_stripe_md_entry *
449 lsme_unpack_comp(struct lov_obd *lov, struct lov_mds_md *lmm,
450 size_t lmm_buf_size, bool inited, loff_t *maxbytes)
454 magic = le32_to_cpu(lmm->lmm_magic);
455 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3 &&
456 magic != LOV_MAGIC_FOREIGN)
457 RETURN(ERR_PTR(-EINVAL));
459 if (magic != LOV_MAGIC_FOREIGN &&
460 le16_to_cpu(lmm->lmm_stripe_count) == 0 &&
461 lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_MDT)
462 RETURN(ERR_PTR(-EINVAL));
464 if (magic == LOV_MAGIC_V1) {
465 return lsme_unpack(lov, lmm, lmm_buf_size, NULL,
466 inited, lmm->lmm_objects, maxbytes);
467 } else if (magic == LOV_MAGIC_V3) {
468 struct lov_mds_md_v3 *lmm3 = (struct lov_mds_md_v3 *)lmm;
470 return lsme_unpack(lov, lmm, lmm_buf_size, lmm3->lmm_pool_name,
471 inited, lmm3->lmm_objects, maxbytes);
472 } else { /* LOV_MAGIC_FOREIGN */
473 return lsme_unpack_foreign(lov, lmm, lmm_buf_size,
478 static struct lov_stripe_md *
479 lsm_unpackmd_comp_md_v1(struct lov_obd *lov, void *buf, size_t buf_size)
481 struct lov_comp_md_v1 *lcm = buf;
482 struct lov_stripe_md *lsm;
484 unsigned int entry_count = 0;
489 rc = lsm_verify_comp_md_v1(buf, buf_size);
493 entry_count = le16_to_cpu(lcm->lcm_entry_count);
495 lsm_size = offsetof(typeof(*lsm), lsm_entries[entry_count]);
496 OBD_ALLOC(lsm, lsm_size);
498 return ERR_PTR(-ENOMEM);
500 atomic_set(&lsm->lsm_refc, 1);
501 spin_lock_init(&lsm->lsm_lock);
502 lsm->lsm_magic = le32_to_cpu(lcm->lcm_magic);
503 lsm->lsm_layout_gen = le32_to_cpu(lcm->lcm_layout_gen);
504 lsm->lsm_entry_count = entry_count;
505 lsm->lsm_mirror_count = le16_to_cpu(lcm->lcm_mirror_count);
506 lsm->lsm_flags = le16_to_cpu(lcm->lcm_flags);
507 lsm->lsm_is_released = true;
508 lsm->lsm_maxbytes = LLONG_MIN;
510 for (i = 0; i < entry_count; i++) {
511 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
512 struct lov_stripe_md_entry *lsme;
517 blob_offset = le32_to_cpu(lcme->lcme_offset);
518 blob_size = le32_to_cpu(lcme->lcme_size);
519 blob = (char *)lcm + blob_offset;
521 lsme = lsme_unpack_comp(lov, blob, blob_size,
522 le32_to_cpu(lcme->lcme_flags) &
524 (i == entry_count - 1) ? &maxbytes :
527 GOTO(out_lsm, rc = PTR_ERR(lsme));
529 if (!(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
530 lsm->lsm_is_released = false;
532 lsm->lsm_entries[i] = lsme;
533 lsme->lsme_id = le32_to_cpu(lcme->lcme_id);
534 lsme->lsme_flags = le32_to_cpu(lcme->lcme_flags);
535 if (lsme->lsme_flags & LCME_FL_NOSYNC)
536 lsme->lsme_timestamp =
537 le64_to_cpu(lcme->lcme_timestamp);
538 lu_extent_le_to_cpu(&lsme->lsme_extent, &lcme->lcme_extent);
540 if (i == entry_count - 1) {
541 lsm->lsm_maxbytes = (loff_t)lsme->lsme_extent.e_start +
544 * the last component hasn't been defined, or
545 * lsm_maxbytes overflowed.
547 if (!lsme_is_dom(lsme) &&
548 (lsme->lsme_extent.e_end != LUSTRE_EOF ||
550 (loff_t)lsme->lsme_extent.e_start))
551 lsm->lsm_maxbytes = MAX_LFS_FILESIZE;
558 for (i = 0; i < entry_count; i++)
559 if (lsm->lsm_entries[i])
560 lsme_free(lsm->lsm_entries[i]);
562 OBD_FREE(lsm, lsm_size);
567 static const struct lsm_operations lsm_comp_md_v1_ops = {
568 .lsm_unpackmd = lsm_unpackmd_comp_md_v1,
572 lov_stripe_md *lsm_unpackmd_foreign(struct lov_obd *lov, void *buf,
575 struct lov_foreign_md *lfm = buf;
576 struct lov_stripe_md *lsm;
578 struct lov_stripe_md_entry *lsme;
580 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
581 OBD_ALLOC(lsm, lsm_size);
583 RETURN(ERR_PTR(-ENOMEM));
585 atomic_set(&lsm->lsm_refc, 1);
586 spin_lock_init(&lsm->lsm_lock);
587 lsm->lsm_magic = le32_to_cpu(lfm->lfm_magic);
588 lsm->lsm_foreign_size = foreign_size_le(lfm);
590 /* alloc for full foreign EA including format fields */
591 OBD_ALLOC_LARGE(lsme, lsm->lsm_foreign_size);
593 OBD_FREE(lsm, lsm_size);
594 RETURN(ERR_PTR(-ENOMEM));
597 /* copy full foreign EA including format fields */
598 memcpy(lsme, buf, lsm->lsm_foreign_size);
600 lsm_foreign(lsm) = lsme;
605 static const struct lsm_operations lsm_foreign_ops = {
606 .lsm_unpackmd = lsm_unpackmd_foreign,
609 const struct lsm_operations *lsm_op_find(int magic)
616 case LOV_MAGIC_COMP_V1:
617 return &lsm_comp_md_v1_ops;
618 case LOV_MAGIC_FOREIGN:
619 return &lsm_foreign_ops;
621 CERROR("unrecognized lsm_magic %08x\n", magic);
626 void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
631 "lsm %p, objid "DOSTID", maxbytes %#llx, magic 0x%08X, refc: %d, entry: %u, mirror: %u, flags: %u,layout_gen %u\n",
632 lsm, POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
633 atomic_read(&lsm->lsm_refc), lsm->lsm_entry_count,
634 lsm->lsm_mirror_count, lsm->lsm_flags, lsm->lsm_layout_gen);
636 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
637 struct lov_foreign_md *lfm = (void *)lsm_foreign(lsm);
640 "foreign LOV EA, magic %x, length %u, type %x, flags %x, value '%.*s'\n",
641 lfm->lfm_magic, lfm->lfm_length, lfm->lfm_type,
642 lfm->lfm_flags, lfm->lfm_length, lfm->lfm_value);
646 for (i = 0; i < lsm->lsm_entry_count; i++) {
647 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
649 CDEBUG(level, DEXT ": id: %u, flags: %x, "
650 "magic 0x%08X, layout_gen %u, "
651 "stripe count %u, sstripe size %u, "
652 "pool: ["LOV_POOLNAMEF"]\n",
653 PEXT(&lse->lsme_extent), lse->lsme_id, lse->lsme_flags,
654 lse->lsme_magic, lse->lsme_layout_gen,
655 lse->lsme_stripe_count, lse->lsme_stripe_size,
656 lse->lsme_pool_name);
657 if (!lsme_inited(lse) ||
658 lse->lsme_pattern & LOV_PATTERN_F_RELEASED)
660 for (j = 0; j < lse->lsme_stripe_count; j++) {
661 CDEBUG(level, " oinfo:%p: ostid: "DOSTID
662 " ost idx: %d gen: %d\n",
664 POSTID(&lse->lsme_oinfo[j]->loi_oi),
665 lse->lsme_oinfo[j]->loi_ost_idx,
666 lse->lsme_oinfo[j]->loi_ost_gen);
671 int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset)
675 for (i = 0; i < lsm->lsm_entry_count; i++) {
676 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
678 if ((offset >= lse->lsme_extent.e_start &&
679 offset < lse->lsme_extent.e_end) ||
680 (offset == OBD_OBJECT_EOF &&
681 lse->lsme_extent.e_end == OBD_OBJECT_EOF))
689 * lmm_layout_gen overlaps stripe_offset field, it needs to be reset back when
690 * sending to MDT for passing striping checks
692 void lov_fix_ea_for_replay(void *lovea)
694 struct lov_user_md *lmm = lovea;
695 struct lov_comp_md_v1 *c1;
698 switch (le32_to_cpu(lmm->lmm_magic)) {
699 case LOV_USER_MAGIC_V1:
700 case LOV_USER_MAGIC_V3:
701 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
704 case LOV_USER_MAGIC_COMP_V1:
706 for (i = 0; i < le16_to_cpu(c1->lcm_entry_count); i++) {
707 struct lov_comp_md_entry_v1 *ent = &c1->lcm_entries[i];
709 if (le32_to_cpu(ent->lcme_flags) & LCME_FL_INIT) {
710 lmm = (void *)((char *)c1 +
711 le32_to_cpu(ent->lcme_offset));
712 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
717 EXPORT_SYMBOL(lov_fix_ea_for_replay);