+ RETURN(rc);
+}
+
+static struct lov_ost_data_v1 *
+__lfsck_layout_new_v1_lovea(struct lov_mds_md_v1 *lmm,
+ const struct lu_fid *pfid,
+ __u32 stripe_size, __u32 ea_off,
+ __u32 pattern, __u16 count)
+{
+ lmm->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
+ lmm->lmm_pattern = cpu_to_le32(pattern);
+ fid_to_lmm_oi(pfid, &lmm->lmm_oi);
+ lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
+ lmm->lmm_stripe_size = cpu_to_le32(stripe_size);
+ lmm->lmm_stripe_count = cpu_to_le16(count);
+ lmm->lmm_layout_gen = cpu_to_le16(1);
+ memset(&lmm->lmm_objects[0], 0,
+ sizeof(struct lov_ost_data_v1) * count);
+
+ return &lmm->lmm_objects[ea_off];
+}
+
+static int lfsck_layout_new_v1_lovea(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct ost_layout *ol,
+ struct dt_object *parent,
+ struct lu_buf *buf, __u32 ea_off,
+ struct lov_mds_md_v1 **lmm,
+ struct lov_ost_data_v1 **objs)
+{
+ int size;
+ __u32 stripe_size = ol->ol_stripe_size;
+ __u32 pattern = LOV_PATTERN_RAID0;
+ __u16 count;
+
+ if (ol->ol_stripe_count != 0)
+ count = ol->ol_stripe_count;
+ else
+ count = ea_off + 1;
+
+ size = lov_mds_md_size(count, LOV_MAGIC_V1);
+ LASSERTF(buf->lb_len >= size,
+ "buffer len %d is less than real size %d\n",
+ (int)buf->lb_len, size);
+
+ if (stripe_size == 0) {
+ int rc;
+
+ rc = lfsck_layout_get_def_stripesize(env, lfsck, &stripe_size);
+ if (rc)
+ return rc;
+ }
+
+ *lmm = buf->lb_buf;
+ if (ol->ol_stripe_count > 1 ||
+ (ol->ol_stripe_count == 0 && ea_off != 0)) {
+ pattern |= LOV_PATTERN_F_HOLE;
+ memset(&(*lmm)->lmm_objects[0], 0,
+ count * sizeof(struct lov_ost_data_v1));
+ }
+
+ *objs = __lfsck_layout_new_v1_lovea(*lmm, lfsck_dto2fid(parent),
+ stripe_size, ea_off, pattern, count);
+
+ return size;
+}
+
+static int lfsck_layout_new_comp_lovea(const struct lu_env *env,
+ struct lu_orphan_rec_v3 *rec,
+ struct dt_object *parent,
+ struct lu_buf *buf, __u32 ea_off,
+ struct lov_mds_md_v1 **lmm,
+ struct lov_ost_data_v1 **objs)
+{
+ struct ost_layout *ol = &rec->lor_layout;
+ struct lov_comp_md_v1 *lcm;
+ struct lov_comp_md_entry_v1 *lcme;
+ __u32 pattern = LOV_PATTERN_RAID0;
+ __u32 offset = sizeof(*lcm) + sizeof(*lcme);
+ int lcme_size = lov_mds_md_size(ol->ol_stripe_count, LOV_MAGIC_V1);
+ int size = offset + lcme_size;
+
+ LASSERTF(buf->lb_len >= size,
+ "buffer len %d is less than real size %d\n",
+ (int)buf->lb_len, size);
+
+ lcm = buf->lb_buf;
+ lcm->lcm_magic = cpu_to_le32(LOV_MAGIC_COMP_V1);
+ lcm->lcm_size = cpu_to_le32(size);
+ if (rec->lor_range) {
+ lcm->lcm_layout_gen = cpu_to_le32(rec->lor_layout_version +
+ rec->lor_range);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_WRITE_PENDING);
+ } else if (rec->lor_layout_version) {
+ lcm->lcm_layout_gen = cpu_to_le32(rec->lor_layout_version +
+ rec->lor_range);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_NONE);
+ } else {
+ lcm->lcm_layout_gen = cpu_to_le32(1);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_NONE);
+ }
+ lcm->lcm_entry_count = cpu_to_le16(1);
+ /* Currently, we do not know how many mirrors will be, set it as zero
+ * at the beginning. It will be updated when more mirrors are found. */
+ lcm->lcm_mirror_count = 0;
+
+ lcme = &lcm->lcm_entries[0];
+ lcme->lcme_id = cpu_to_le32(ol->ol_comp_id);
+ lcme->lcme_flags = cpu_to_le32(LCME_FL_INIT);
+ lcme->lcme_extent.e_start = cpu_to_le64(ol->ol_comp_start);
+ lcme->lcme_extent.e_end = cpu_to_le64(ol->ol_comp_end);
+ lcme->lcme_offset = cpu_to_le32(offset);
+ lcme->lcme_size = cpu_to_le32(lcme_size);
+ lcme->lcme_layout_gen = lcm->lcm_layout_gen;
+ if (ol->ol_stripe_count > 1)
+ pattern |= LOV_PATTERN_F_HOLE;
+
+ *lmm = buf->lb_buf + offset;
+ *objs = __lfsck_layout_new_v1_lovea(*lmm, lfsck_dto2fid(parent),
+ ol->ol_stripe_size, ea_off,
+ pattern, ol->ol_stripe_count);
+
+ return size;
+}
+
+static void lfsck_layout_update_lcm(struct lov_comp_md_v1 *lcm,
+ struct lov_comp_md_entry_v1 *lcme,
+ __u32 version, __u32 range)
+{
+ struct lov_comp_md_entry_v1 *tmp;
+ __u64 start = le64_to_cpu(lcme->lcme_extent.e_start);
+ __u64 end = le64_to_cpu(lcme->lcme_extent.e_end);
+ __u32 gen = version + range;
+ __u32 tmp_gen;
+ int i;
+ __u16 count = le16_to_cpu(lcm->lcm_entry_count);
+ __u16 flags = le16_to_cpu(lcm->lcm_flags);
+
+ if (!gen)
+ gen = 1;
+ lcme->lcme_layout_gen = cpu_to_le32(gen);
+ if (le32_to_cpu(lcm->lcm_layout_gen) < gen)
+ lcm->lcm_layout_gen = cpu_to_le32(gen);
+
+ if (range)
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_WRITE_PENDING);
+ else if (flags == LCM_FL_NONE && le16_to_cpu(lcm->lcm_mirror_count) > 0)
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_RDONLY);
+
+ for (i = 0; i < count; i++) {
+ tmp = &lcm->lcm_entries[i];
+ if (le64_to_cpu(tmp->lcme_extent.e_end) <= start)
+ continue;
+
+ if (le64_to_cpu(tmp->lcme_extent.e_start) >= end)
+ continue;
+
+ if (le32_to_cpu(tmp->lcme_flags) & LCME_FL_STALE)
+ continue;
+
+ tmp_gen = le32_to_cpu(tmp->lcme_layout_gen);
+ /* "lcme_layout_gen == 0" but without LCME_FL_STALE flag,
+ * then it should be the latest version of all mirrors. */
+ if (tmp_gen == 0 || tmp_gen > gen) {
+ lcme->lcme_flags = cpu_to_le32(
+ le32_to_cpu(lcme->lcme_flags) | LCME_FL_STALE);
+ break;
+ }
+
+ if (tmp_gen < gen)
+ tmp->lcme_flags = cpu_to_le32(
+ le32_to_cpu(tmp->lcme_flags) | LCME_FL_STALE);
+ }
+}
+
+static int lfsck_layout_add_comp(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct thandle *handle,
+ struct lu_orphan_rec_v3 *rec,
+ struct dt_object *parent,
+ const struct lu_fid *cfid,
+ struct lu_buf *buf, __u32 ost_idx,
+ __u32 ea_off, int pos, bool new_mirror)
+{
+ struct ost_layout *ol = &rec->lor_layout;
+ struct lov_comp_md_v1 *lcm = buf->lb_buf;
+ struct lov_comp_md_entry_v1 *lcme;
+ struct lov_mds_md_v1 *lmm;
+ struct lov_ost_data_v1 *objs;
+ int added = sizeof(*lcme) +
+ lov_mds_md_size(ol->ol_stripe_count, LOV_MAGIC_V1);
+ int size = le32_to_cpu(lcm->lcm_size) + added;
+ int rc;
+ int i;
+ __u32 offset;
+ __u32 pattern = LOV_PATTERN_RAID0;
+ __u16 count = le16_to_cpu(lcm->lcm_entry_count);
+ ENTRY;
+
+ lu_buf_check_and_grow(buf, size);
+ /* set the lcm again because lu_buf_check_and_grow() may
+ * have reallocated the buf. */
+ lcm = buf->lb_buf;
+ lcm->lcm_size = cpu_to_le32(size);
+ lcm->lcm_entry_count = cpu_to_le16(count + 1);
+ if (new_mirror)
+ le16_add_cpu(&lcm->lcm_mirror_count, 1);
+
+ /* 1. Move the component bodies from [pos, count-1] to [pos+1, count]
+ * with distance of 'added'. */
+ if (pos < count) {
+ size = 0;
+ for (i = pos; i < count; i++) {
+ lcme = &lcm->lcm_entries[i];
+ size += le32_to_cpu(lcme->lcme_size);
+ }
+
+ offset = le32_to_cpu(lcm->lcm_entries[pos].lcme_offset);
+ memmove(buf->lb_buf + offset + added,
+ buf->lb_buf + offset, size);
+ }
+
+ size = 0;
+ /* 2. Move the component header [0, pos-1] to [0, pos-1] with distance
+ * of 'sizeof(struct lov_comp_md_entry_v1)' */
+ if (pos > 0) {
+ for (i = 0; i < pos; i++) {
+ lcme = &lcm->lcm_entries[i];
+ size += le32_to_cpu(lcme->lcme_size);
+ }
+
+ offset = le32_to_cpu(lcm->lcm_entries[0].lcme_offset);
+ memmove(buf->lb_buf + offset + sizeof(*lcme),
+ buf->lb_buf + offset, size);
+ }
+
+ /* 3. Recalculate the enter offset for the component [pos, count-1] */
+ for (i = count - 1; i >= pos; i--) {
+ lcm->lcm_entries[i + 1] = lcm->lcm_entries[i];
+ lcm->lcm_entries[i + 1].lcme_offset =
+ cpu_to_le32(le32_to_cpu(lcm->lcm_entries[i + 1].
+ lcme_offset) + added);
+ }
+
+ /* 4. Recalculate the enter offset for the component [0, pos) */
+ for (i = 0; i < pos; i++) {
+ lcm->lcm_entries[i].lcme_offset =
+ cpu_to_le32(le32_to_cpu(lcm->lcm_entries[i].
+ lcme_offset) + sizeof(*lcme));
+ }
+
+ offset = sizeof(*lcm) + sizeof(*lcme) * (count + 1) + size;
+ /* 4. Insert the new component header (entry) at the slot 'pos'. */
+ lcme = &lcm->lcm_entries[pos];
+ lcme->lcme_id = cpu_to_le32(ol->ol_comp_id);
+ lcme->lcme_flags = cpu_to_le32(LCME_FL_INIT);
+ lcme->lcme_extent.e_start = cpu_to_le64(ol->ol_comp_start);
+ lcme->lcme_extent.e_end = cpu_to_le64(ol->ol_comp_end);
+ lcme->lcme_offset = cpu_to_le32(offset);
+ lcme->lcme_size = cpu_to_le32(lov_mds_md_size(ol->ol_stripe_count,
+ LOV_MAGIC_V1));
+
+ if (ol->ol_stripe_count > 1)
+ pattern |= LOV_PATTERN_F_HOLE;
+
+ lmm = buf->lb_buf + offset;
+ /* 5. Insert teh new component body at the 'offset'. */
+ objs = __lfsck_layout_new_v1_lovea(lmm, lfsck_dto2fid(parent),
+ ol->ol_stripe_size, ea_off,
+ pattern, ol->ol_stripe_count);
+
+ /* 6. Update mirror related flags and version. */
+ lfsck_layout_update_lcm(lcm, lcme, rec->lor_layout_version,
+ rec->lor_range);
+
+ rc = lfsck_layout_refill_lovea(env, lfsck, handle, parent, cfid, buf,
+ lmm, objs, LU_XATTR_REPLACE, ost_idx,
+ le32_to_cpu(lcm->lcm_size));
+
+ CDEBUG(D_LFSCK, "%s: layout LFSCK assistant add new COMP for "
+ DFID": parent "DFID", OST-index %u, stripe-index %u, "
+ "stripe_size %u, stripe_count %u, comp_id %u, comp_start %llu, "
+ "comp_end %llu, layout version %u, range %u, "
+ "%s LOV EA hole: rc = %d\n",
+ lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
+ ost_idx, ea_off, ol->ol_stripe_size, ol->ol_stripe_count,
+ ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end,
+ rec->lor_layout_version, rec->lor_range,
+ le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE ?
+ "with" : "without", rc);
+
+ RETURN(rc);
+}
+
+static int lfsck_layout_extend_v1v3_lovea(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct thandle *handle,
+ struct ost_layout *ol,
+ struct dt_object *parent,
+ const struct lu_fid *cfid,
+ struct lu_buf *buf, __u32 ost_idx,
+ __u32 ea_off)
+{
+ struct lov_mds_md_v1 *lmm = buf->lb_buf;
+ struct lov_ost_data_v1 *objs;
+ __u16 count = le16_to_cpu(lmm->lmm_stripe_count);
+ __u32 magic = le32_to_cpu(lmm->lmm_magic);
+ int size;
+ int gap;
+ int rc;
+ ENTRY;
+
+ /* The original LOVEA maybe re-generated via old filter_fid, at
+ * that time, we do not know the stripe count and stripe size. */
+ if (ol->ol_stripe_count > count)
+ count = ol->ol_stripe_count;
+ if (ol->ol_stripe_size != 0 &&
+ ol->ol_stripe_size != le32_to_cpu(lmm->lmm_stripe_size))
+ lmm->lmm_stripe_size = cpu_to_le32(ol->ol_stripe_size);
+
+ if (magic == LOV_MAGIC_V1)
+ objs = &lmm->lmm_objects[count];
+ else
+ objs = &((struct lov_mds_md_v3 *)lmm)->lmm_objects[count];
+
+ gap = ea_off - count;
+ if (gap >= 0)
+ count = ea_off + 1;
+
+ size = lov_mds_md_size(count, magic);
+ LASSERTF(buf->lb_len >= size,
+ "buffer len %d is less than real size %d\n",
+ (int)buf->lb_len, size);
+
+ if (gap > 0) {
+ memset(objs, 0, gap * sizeof(*objs));
+ lmm->lmm_pattern |= cpu_to_le32(LOV_PATTERN_F_HOLE);
+ }
+
+ lmm->lmm_layout_gen = cpu_to_le16(le16_to_cpu(lmm->lmm_layout_gen) + 1);
+ lmm->lmm_stripe_count = cpu_to_le16(count);
+ objs += gap;
+
+ rc = lfsck_layout_refill_lovea(env, lfsck, handle, parent, cfid, buf,
+ lmm, objs, LU_XATTR_REPLACE, ost_idx, size);
+
+ CDEBUG(D_LFSCK, "%s: layout LFSCK assistant extend layout EA for "
+ DFID": parent "DFID", OST-index %u, stripe-index %u, "
+ "stripe_size %u, stripe_count %u, comp_id %u, comp_start %llu, "
+ "comp_end %llu, %s LOV EA hole: rc = %d\n",
+ lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
+ ost_idx, ea_off, ol->ol_stripe_size, ol->ol_stripe_count,
+ ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end,
+ le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE ?
+ "with" : "without", rc);
+
+ RETURN(rc);