4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Wang Di <wangdi@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_LOV
38 #include <linux/math64.h>
39 #include <linux/sort.h>
40 #include <libcfs/libcfs.h>
42 #include <obd_class.h>
43 #include "lov_internal.h"
46 lu_extent_le_to_cpu(struct lu_extent *dst, const struct lu_extent *src)
48 dst->e_start = le64_to_cpu(src->e_start);
49 dst->e_end = le64_to_cpu(src->e_end);
53 * Find minimum stripe maxbytes value. For inactive or
54 * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
56 static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
58 struct obd_import *imp;
59 loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
64 imp = tgt->ltd_obd->u.cli.cl_import;
68 spin_lock(&imp->imp_lock);
69 if ((imp->imp_state == LUSTRE_IMP_FULL ||
70 imp->imp_state == LUSTRE_IMP_IDLE) &&
71 (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
72 imp->imp_connect_data.ocd_maxbytes > 0)
73 maxbytes = imp->imp_connect_data.ocd_maxbytes;
75 spin_unlock(&imp->imp_lock);
80 static int lsm_lmm_verify_v1v3(struct lov_mds_md *lmm, size_t lmm_size,
83 u32 pattern = le32_to_cpu(lmm->lmm_pattern);
86 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
88 CERROR("lov: bad stripe count %d: rc = %d\n",
90 lov_dump_lmm_common(D_WARNING, lmm);
94 if (lmm_oi_id(&lmm->lmm_oi) == 0) {
96 CERROR("lov: zero object id: rc = %d\n", rc);
97 lov_dump_lmm_common(D_WARNING, lmm);
101 if (!lov_pattern_supported(lov_pattern(pattern))) {
103 static ktime_t time2_clear_nr;
104 ktime_t now = ktime_get();
106 /* limit this message 20 times within 24h */
107 if (ktime_after(now, time2_clear_nr)) {
109 time2_clear_nr = ktime_add_ms(now,
110 24 * 3600 * MSEC_PER_SEC);
113 CWARN("lov: unrecognized striping pattern: rc = %d\n",
115 lov_dump_lmm_common(D_WARNING, lmm);
120 if (lmm->lmm_stripe_size == 0 ||
121 (le32_to_cpu(lmm->lmm_stripe_size)&(LOV_MIN_STRIPE_SIZE-1)) != 0) {
123 CERROR("lov: bad stripe size %u: rc = %d\n",
124 le32_to_cpu(lmm->lmm_stripe_size), rc);
125 lov_dump_lmm_common(D_WARNING, lmm);
133 static void lsme_free(struct lov_stripe_md_entry *lsme)
135 unsigned int stripe_count;
139 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN) {
141 * TODO: In addition to HSM foreign layout, It needs to add
142 * support for other kinds of foreign layout types such as
143 * DAOS, S3. When add these supports, it will use non-inline
144 * @lov_hsm_base to store layout information, and need to
145 * free extra allocated buffer.
147 OBD_FREE_LARGE(lsme, sizeof(*lsme));
151 stripe_count = lsme->lsme_stripe_count;
152 if (!lsme_inited(lsme) ||
153 lsme->lsme_pattern & LOV_PATTERN_F_RELEASED ||
154 !lov_supported_comp_magic(lsme->lsme_magic) ||
155 !lov_pattern_supported(lov_pattern(lsme->lsme_pattern)))
157 for (i = 0; i < stripe_count; i++)
158 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
160 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
161 OBD_FREE_LARGE(lsme, lsme_size);
164 void lsm_free(struct lov_stripe_md *lsm)
166 unsigned int entry_count = lsm->lsm_entry_count;
170 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
171 OBD_FREE_LARGE(lsm_foreign(lsm), lsm->lsm_foreign_size);
173 for (i = 0; i < entry_count; i++)
174 lsme_free(lsm->lsm_entries[i]);
177 lsm_size = lsm->lsm_magic == LOV_MAGIC_FOREIGN ?
178 offsetof(typeof(*lsm), lsm_entries[1]) :
179 offsetof(typeof(*lsm), lsm_entries[entry_count]);
180 OBD_FREE(lsm, lsm_size);
184 * Unpack a struct lov_mds_md into a struct lov_stripe_md_entry.
186 * The caller should set id and extent.
188 static struct lov_stripe_md_entry *
189 lsme_unpack(struct lov_obd *lov, struct lov_mds_md *lmm, size_t buf_size,
190 const char *pool_name, bool inited, struct lov_ost_data_v1 *objects,
193 struct lov_stripe_md_entry *lsme;
195 loff_t min_stripe_maxbytes = 0;
199 unsigned int stripe_count;
203 magic = le32_to_cpu(lmm->lmm_magic);
204 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)
205 RETURN(ERR_PTR(-EINVAL));
207 pattern = le32_to_cpu(lmm->lmm_pattern);
208 if (pattern & LOV_PATTERN_F_RELEASED || !inited ||
209 !lov_pattern_supported(lov_pattern(pattern)))
212 stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
214 if (buf_size < lov_mds_md_size(stripe_count, magic)) {
215 CERROR("LOV EA %s too small: %zu, need %u\n",
216 magic == LOV_MAGIC_V1 ? "V1" : "V3", buf_size,
217 lov_mds_md_size(stripe_count, magic == LOV_MAGIC_V1 ?
218 LOV_MAGIC_V1 : LOV_MAGIC_V3));
219 lov_dump_lmm_common(D_WARNING, lmm);
220 return ERR_PTR(-EINVAL);
223 rc = lsm_lmm_verify_v1v3(lmm, buf_size, stripe_count);
227 lsme_size = offsetof(typeof(*lsme), lsme_oinfo[stripe_count]);
228 OBD_ALLOC_LARGE(lsme, lsme_size);
230 RETURN(ERR_PTR(-ENOMEM));
232 lsme->lsme_magic = magic;
233 lsme->lsme_pattern = pattern;
234 lsme->lsme_flags = 0;
235 lsme->lsme_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
236 /* preserve the possible -1 stripe count for uninstantiated component */
237 lsme->lsme_stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
238 lsme->lsme_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
241 size_t pool_name_len;
243 pool_name_len = strlcpy(lsme->lsme_pool_name, pool_name,
244 sizeof(lsme->lsme_pool_name));
245 if (pool_name_len >= sizeof(lsme->lsme_pool_name))
246 GOTO(out_lsme, rc = -E2BIG);
249 /* with Data-on-MDT set maxbytes to stripe size */
250 if (lsme_is_dom(lsme)) {
252 lov_bytes = lsme->lsme_stripe_size;
259 for (i = 0; i < stripe_count; i++) {
260 struct lov_oinfo *loi;
261 struct lov_tgt_desc *ltd;
263 OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, GFP_NOFS);
265 GOTO(out_lsme, rc = -ENOMEM);
267 lsme->lsme_oinfo[i] = loi;
269 ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
270 loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
271 loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
272 if (lov_oinfo_is_dummy(loi))
275 if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
276 !lov2obd(lov)->obd_process_conf) {
277 CERROR("%s: OST index %d more than OST count %d\n",
278 (char*)lov->desc.ld_uuid.uuid,
279 loi->loi_ost_idx, lov->desc.ld_tgt_count);
280 lov_dump_lmm_v1(D_WARNING, lmm);
281 GOTO(out_lsme, rc = -EINVAL);
284 ltd = lov->lov_tgts[loi->loi_ost_idx];
286 CERROR("%s: OST index %d missing\n",
287 (char*)lov->desc.ld_uuid.uuid, loi->loi_ost_idx);
288 lov_dump_lmm_v1(D_WARNING, lmm);
292 lov_bytes = lov_tgt_maxbytes(ltd);
293 if (min_stripe_maxbytes == 0 || lov_bytes < min_stripe_maxbytes)
294 min_stripe_maxbytes = lov_bytes;
298 if (min_stripe_maxbytes == 0)
299 min_stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
301 if (stripe_count == 0)
302 stripe_count = lov->desc.ld_tgt_count;
304 if (min_stripe_maxbytes <= LLONG_MAX / stripe_count)
305 lov_bytes = min_stripe_maxbytes * stripe_count;
307 lov_bytes = MAX_LFS_FILESIZE;
309 *maxbytes = min_t(loff_t, lov_bytes, MAX_LFS_FILESIZE);
316 for (i = 0; i < stripe_count; i++) {
317 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
320 OBD_SLAB_FREE_PTR(lsme->lsme_oinfo[i], lov_oinfo_slab);
322 OBD_FREE_LARGE(lsme, lsme_size);
328 lov_stripe_md *lsm_unpackmd_v1v3(struct lov_obd *lov, struct lov_mds_md *lmm,
329 size_t buf_size, const char *pool_name,
330 struct lov_ost_data_v1 *objects)
332 struct lov_stripe_md *lsm;
333 struct lov_stripe_md_entry *lsme;
339 pattern = le32_to_cpu(lmm->lmm_pattern);
341 lsme = lsme_unpack(lov, lmm, buf_size, pool_name, true, objects,
344 RETURN(ERR_CAST(lsme));
346 lsme->lsme_flags = LCME_FL_INIT;
347 lsme->lsme_extent.e_start = 0;
348 lsme->lsme_extent.e_end = LUSTRE_EOF;
350 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
351 OBD_ALLOC(lsm, lsm_size);
353 GOTO(out_lsme, rc = -ENOMEM);
355 atomic_set(&lsm->lsm_refc, 1);
356 spin_lock_init(&lsm->lsm_lock);
357 lsm->lsm_maxbytes = maxbytes;
358 lmm_oi_le_to_cpu(&lsm->lsm_oi, &lmm->lmm_oi);
359 lsm->lsm_magic = le32_to_cpu(lmm->lmm_magic);
360 lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
361 lsm->lsm_entry_count = 1;
362 lsm->lsm_is_released = pattern & LOV_PATTERN_F_RELEASED;
363 lsm->lsm_entries[0] = lsme;
373 static struct lov_stripe_md *
374 lsm_unpackmd_v1(struct lov_obd *lov, void *buf, size_t buf_size)
376 struct lov_mds_md_v1 *lmm = buf;
378 return lsm_unpackmd_v1v3(lov, buf, buf_size, NULL, lmm->lmm_objects);
381 static const struct lsm_operations lsm_v1_ops = {
382 .lsm_unpackmd = lsm_unpackmd_v1,
385 static struct lov_stripe_md *
386 lsm_unpackmd_v3(struct lov_obd *lov, void *buf, size_t buf_size)
388 struct lov_mds_md_v3 *lmm = buf;
390 return lsm_unpackmd_v1v3(lov, buf, buf_size, lmm->lmm_pool_name,
394 static const struct lsm_operations lsm_v3_ops = {
395 .lsm_unpackmd = lsm_unpackmd_v3,
398 static int lsm_verify_comp_md_v1(struct lov_comp_md_v1 *lcm,
401 unsigned int entry_count;
405 lcm_size = le32_to_cpu(lcm->lcm_size);
406 if (lcm_buf_size < lcm_size) {
407 CERROR("bad LCM buffer size %zu, expected %zu\n",
408 lcm_buf_size, lcm_size);
412 entry_count = le16_to_cpu(lcm->lcm_entry_count);
413 for (i = 0; i < entry_count; i++) {
414 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
418 blob_offset = le32_to_cpu(lcme->lcme_offset);
419 blob_size = le32_to_cpu(lcme->lcme_size);
421 if (lcm_size < blob_offset || lcm_size < blob_size ||
422 lcm_size < blob_offset + blob_size) {
423 CERROR("LCM entry %u has invalid blob: "
424 "LCM size = %zu, offset = %zu, size = %zu\n",
425 le32_to_cpu(lcme->lcme_id),
426 lcm_size, blob_offset, blob_size);
434 static struct lov_stripe_md_entry *
435 lsme_unpack_foreign(struct lov_obd *lov, void *buf, size_t buf_size,
436 bool inited, loff_t *maxbytes)
438 struct lov_stripe_md_entry *lsme;
439 struct lov_foreign_md *lfm = buf;
444 magic = le32_to_cpu(lfm->lfm_magic);
445 if (magic != LOV_MAGIC_FOREIGN)
446 RETURN(ERR_PTR(-EINVAL));
448 OBD_ALLOC_LARGE(lsme, sizeof(*lsme));
450 RETURN(ERR_PTR(-ENOMEM));
452 lsme->lsme_magic = magic;
453 lsme->lsme_pattern = LOV_PATTERN_FOREIGN;
454 lsme->lsme_flags = 0;
457 *maxbytes = MAX_LFS_FILESIZE;
462 static struct lov_stripe_md_entry *
463 lsme_unpack_comp(struct lov_obd *lov, struct lov_mds_md *lmm,
464 size_t lmm_buf_size, bool inited, loff_t *maxbytes)
468 magic = le32_to_cpu(lmm->lmm_magic);
469 if (!lov_supported_comp_magic(magic)) {
470 struct lov_stripe_md_entry *lsme;
472 /* allocate a lsme holder for invalid magic lmm */
473 OBD_ALLOC_LARGE(lsme, offsetof(typeof(*lsme), lsme_oinfo[0]));
474 lsme->lsme_magic = magic;
475 lsme->lsme_pattern = le32_to_cpu(lmm->lmm_pattern);
480 if (magic != LOV_MAGIC_FOREIGN &&
481 le16_to_cpu(lmm->lmm_stripe_count) == 0 &&
482 !(lov_pattern(le32_to_cpu(lmm->lmm_pattern)) & LOV_PATTERN_MDT))
483 RETURN(ERR_PTR(-EINVAL));
485 if (magic == LOV_MAGIC_V1) {
486 return lsme_unpack(lov, lmm, lmm_buf_size, NULL,
487 inited, lmm->lmm_objects, maxbytes);
488 } else if (magic == LOV_MAGIC_V3) {
489 struct lov_mds_md_v3 *lmm3 = (struct lov_mds_md_v3 *)lmm;
491 return lsme_unpack(lov, lmm, lmm_buf_size, lmm3->lmm_pool_name,
492 inited, lmm3->lmm_objects, maxbytes);
493 } else { /* LOV_MAGIC_FOREIGN */
494 return lsme_unpack_foreign(lov, lmm, lmm_buf_size,
499 static struct lov_stripe_md *
500 lsm_unpackmd_comp_md_v1(struct lov_obd *lov, void *buf, size_t buf_size)
502 struct lov_comp_md_v1 *lcm = buf;
503 struct lov_stripe_md *lsm;
505 unsigned int entry_count = 0;
510 rc = lsm_verify_comp_md_v1(buf, buf_size);
514 entry_count = le16_to_cpu(lcm->lcm_entry_count);
516 lsm_size = offsetof(typeof(*lsm), lsm_entries[entry_count]);
517 OBD_ALLOC(lsm, lsm_size);
519 return ERR_PTR(-ENOMEM);
521 atomic_set(&lsm->lsm_refc, 1);
522 spin_lock_init(&lsm->lsm_lock);
523 lsm->lsm_magic = le32_to_cpu(lcm->lcm_magic);
524 lsm->lsm_layout_gen = le32_to_cpu(lcm->lcm_layout_gen);
525 lsm->lsm_entry_count = entry_count;
526 lsm->lsm_mirror_count = le16_to_cpu(lcm->lcm_mirror_count);
527 lsm->lsm_flags = le16_to_cpu(lcm->lcm_flags);
528 lsm->lsm_is_released = true;
529 lsm->lsm_maxbytes = LLONG_MIN;
531 for (i = 0; i < entry_count; i++) {
532 struct lov_comp_md_entry_v1 *lcme = &lcm->lcm_entries[i];
533 struct lov_stripe_md_entry *lsme;
538 blob_offset = le32_to_cpu(lcme->lcme_offset);
539 blob_size = le32_to_cpu(lcme->lcme_size);
540 blob = (char *)lcm + blob_offset;
542 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_MAGIC) &&
543 (cfs_fail_val == i + 1)))
544 ((struct lov_mds_md *)blob)->lmm_magic = LOV_MAGIC_BAD;
546 if (unlikely(CFS_FAIL_CHECK(OBD_FAIL_LOV_COMP_PATTERN) &&
547 (cfs_fail_val == i + 1))) {
548 ((struct lov_mds_md *)blob)->lmm_pattern =
552 lsme = lsme_unpack_comp(lov, blob, blob_size,
553 le32_to_cpu(lcme->lcme_flags) &
555 (i == entry_count - 1) ? &maxbytes :
558 GOTO(out_lsm, rc = PTR_ERR(lsme));
561 * pressume that unrecognized magic component also has valid
562 * lsme_id/lsme_flags/lsme_extent
564 if (!(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
565 lsm->lsm_is_released = false;
567 lsm->lsm_entries[i] = lsme;
568 lsme->lsme_id = le32_to_cpu(lcme->lcme_id);
569 lsme->lsme_flags = le32_to_cpu(lcme->lcme_flags);
570 if (lsme->lsme_flags & LCME_FL_NOSYNC)
571 lsme->lsme_timestamp =
572 le64_to_cpu(lcme->lcme_timestamp);
573 lu_extent_le_to_cpu(&lsme->lsme_extent, &lcme->lcme_extent);
575 if (i == entry_count - 1) {
576 lsm->lsm_maxbytes = (loff_t)lsme->lsme_extent.e_start +
579 * the last component hasn't been defined, or
580 * lsm_maxbytes overflowed.
582 if (!lsme_is_dom(lsme) &&
583 (lsme->lsme_extent.e_end != LUSTRE_EOF ||
585 (loff_t)lsme->lsme_extent.e_start))
586 lsm->lsm_maxbytes = MAX_LFS_FILESIZE;
593 for (i = 0; i < entry_count; i++)
594 if (lsm->lsm_entries[i])
595 lsme_free(lsm->lsm_entries[i]);
597 OBD_FREE(lsm, lsm_size);
602 static const struct lsm_operations lsm_comp_md_v1_ops = {
603 .lsm_unpackmd = lsm_unpackmd_comp_md_v1,
607 lov_stripe_md *lsm_unpackmd_foreign(struct lov_obd *lov, void *buf,
610 struct lov_foreign_md *lfm = buf;
611 struct lov_stripe_md *lsm;
613 struct lov_stripe_md_entry *lsme;
615 lsm_size = offsetof(typeof(*lsm), lsm_entries[1]);
616 OBD_ALLOC(lsm, lsm_size);
618 RETURN(ERR_PTR(-ENOMEM));
620 atomic_set(&lsm->lsm_refc, 1);
621 spin_lock_init(&lsm->lsm_lock);
622 lsm->lsm_magic = le32_to_cpu(lfm->lfm_magic);
623 lsm->lsm_foreign_size = lov_foreign_size_le(lfm);
625 /* alloc for full foreign EA including format fields */
626 OBD_ALLOC_LARGE(lsme, lsm->lsm_foreign_size);
628 OBD_FREE(lsm, lsm_size);
629 RETURN(ERR_PTR(-ENOMEM));
632 /* copy full foreign EA including format fields */
633 memcpy(lsme, buf, lsm->lsm_foreign_size);
635 lsm_foreign(lsm) = lsme;
640 static const struct lsm_operations lsm_foreign_ops = {
641 .lsm_unpackmd = lsm_unpackmd_foreign,
644 const struct lsm_operations *lsm_op_find(int magic)
651 case LOV_MAGIC_COMP_V1:
652 return &lsm_comp_md_v1_ops;
653 case LOV_MAGIC_FOREIGN:
654 return &lsm_foreign_ops;
656 CERROR("unrecognized lsm_magic %08x\n", magic);
661 void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm)
666 "lsm %p, objid "DOSTID", maxbytes %#llx, magic 0x%08X, refc: %d, entry: %u, mirror: %u, flags: %u,layout_gen %u\n",
667 lsm, POSTID(&lsm->lsm_oi), lsm->lsm_maxbytes, lsm->lsm_magic,
668 atomic_read(&lsm->lsm_refc), lsm->lsm_entry_count,
669 lsm->lsm_mirror_count, lsm->lsm_flags, lsm->lsm_layout_gen);
671 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN) {
672 struct lov_foreign_md *lfm = (void *)lsm_foreign(lsm);
675 "foreign LOV EA, magic %x, length %u, type %x, flags %x, value '%.*s'\n",
676 lfm->lfm_magic, lfm->lfm_length, lfm->lfm_type,
677 lfm->lfm_flags, lfm->lfm_length, lfm->lfm_value);
681 for (i = 0; i < lsm->lsm_entry_count; i++) {
682 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
684 CDEBUG(level, DEXT ": id: %u, flags: %x, "
685 "magic 0x%08X, layout_gen %u, "
686 "stripe count %u, sstripe size %u, "
687 "pool: ["LOV_POOLNAMEF"]\n",
688 PEXT(&lse->lsme_extent), lse->lsme_id, lse->lsme_flags,
689 lse->lsme_magic, lse->lsme_layout_gen,
690 lse->lsme_stripe_count, lse->lsme_stripe_size,
691 lse->lsme_pool_name);
692 if (!lsme_inited(lse) ||
693 lse->lsme_pattern & LOV_PATTERN_F_RELEASED ||
694 !lov_supported_comp_magic(lse->lsme_magic) ||
695 !lov_pattern_supported(lov_pattern(lse->lsme_pattern)))
697 for (j = 0; j < lse->lsme_stripe_count; j++) {
698 CDEBUG(level, " oinfo:%p: ostid: "DOSTID
699 " ost idx: %d gen: %d\n",
701 POSTID(&lse->lsme_oinfo[j]->loi_oi),
702 lse->lsme_oinfo[j]->loi_ost_idx,
703 lse->lsme_oinfo[j]->loi_ost_gen);
708 int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset)
712 for (i = 0; i < lsm->lsm_entry_count; i++) {
713 struct lov_stripe_md_entry *lse = lsm->lsm_entries[i];
715 if ((offset >= lse->lsme_extent.e_start &&
716 offset < lse->lsme_extent.e_end) ||
717 (offset == OBD_OBJECT_EOF &&
718 lse->lsme_extent.e_end == OBD_OBJECT_EOF))
726 * lmm_layout_gen overlaps stripe_offset field, it needs to be reset back when
727 * sending to MDT for passing striping checks
729 void lov_fix_ea_for_replay(void *lovea)
731 struct lov_user_md *lmm = lovea;
732 struct lov_comp_md_v1 *c1;
735 switch (le32_to_cpu(lmm->lmm_magic)) {
736 case LOV_USER_MAGIC_V1:
737 case LOV_USER_MAGIC_V3:
738 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
741 case LOV_USER_MAGIC_COMP_V1:
743 for (i = 0; i < le16_to_cpu(c1->lcm_entry_count); i++) {
744 struct lov_comp_md_entry_v1 *ent = &c1->lcm_entries[i];
746 if (le32_to_cpu(ent->lcme_flags) & LCME_FL_INIT) {
747 lmm = (void *)((char *)c1 +
748 le32_to_cpu(ent->lcme_offset));
749 lmm->lmm_stripe_offset = LOV_OFFSET_DEFAULT;
754 EXPORT_SYMBOL(lov_fix_ea_for_replay);