+ if (!new_layout && lmv_hash_is_migrating(saved_hash))
+ stripe_index += migrate_offset;
+
+ LASSERT(stripe_index < saved_count);
+
+ CDEBUG(D_INFO, "name %.*s hash=%#x/%#x idx=%d/%u/%u under %s layout\n",
+ namelen, name, saved_hash, migrate_hash, stripe_index,
+ saved_count, migrate_offset, new_layout ? "new" : "old");
+
+ return stripe_index;
+}
+
+static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv,
+ const char *name, int namelen)
+{
+ if (lmv->lmv_magic == LMV_MAGIC_V1)
+ return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
+ lmv->lmv_stripe_count,
+ lmv->lmv_migrate_hash,
+ lmv->lmv_migrate_offset,
+ name, namelen, true);
+
+ if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1))
+ return __lmv_name_to_stripe_index(
+ le32_to_cpu(lmv->lmv_hash_type),
+ le32_to_cpu(lmv->lmv_stripe_count),
+ le32_to_cpu(lmv->lmv_migrate_hash),
+ le32_to_cpu(lmv->lmv_migrate_offset),
+ name, namelen, true);
+
+ return -EINVAL;
+}
+
+static inline int lmv_name_to_stripe_index_old(struct lmv_mds_md_v1 *lmv,
+ const char *name, int namelen)
+{
+ if (lmv->lmv_magic == LMV_MAGIC_V1 ||
+ lmv->lmv_magic == LMV_MAGIC_STRIPE)
+ return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
+ lmv->lmv_stripe_count,
+ lmv->lmv_migrate_hash,
+ lmv->lmv_migrate_offset,
+ name, namelen, false);
+
+ if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
+ lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
+ return __lmv_name_to_stripe_index(
+ le32_to_cpu(lmv->lmv_hash_type),
+ le32_to_cpu(lmv->lmv_stripe_count),
+ le32_to_cpu(lmv->lmv_migrate_hash),
+ le32_to_cpu(lmv->lmv_migrate_offset),
+ name, namelen, false);
+
+ return -EINVAL;
+}
+
+static inline bool lmv_user_magic_supported(__u32 lum_magic)
+{
+ return lum_magic == LMV_USER_MAGIC ||
+ lum_magic == LMV_USER_MAGIC_SPECIFIC ||
+ lum_magic == LMV_MAGIC_FOREIGN;
+}
+
+#define LMV_DEBUG(mask, lmv, msg) \
+ CDEBUG(mask, \
+ "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate offset=%u migrate hash=%s:%u.\n",\
+ msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count, \
+ (lmv)->lmv_master_mdt_index, \
+ mdt_hash_name[(lmv)->lmv_hash_type & (LMV_HASH_TYPE_MAX - 1)],\
+ (lmv)->lmv_hash_type, (lmv)->lmv_layout_version, \
+ (lmv)->lmv_migrate_offset, \
+ mdt_hash_name[(lmv)->lmv_migrate_hash & (LMV_HASH_TYPE_MAX - 1)],\
+ (lmv)->lmv_migrate_hash)
+
+/* master LMV is sane */
+static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv)
+ return false;
+
+ if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1)
+ goto insane;
+
+ if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
+ goto insane;
+
+ if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
+ goto insane;
+
+ return true;
+insane:
+ LMV_DEBUG(D_ERROR, lmv, "insane");
+ return false;
+}
+
+/* LMV can be either master or stripe LMV */
+static inline bool lmv_is_sane2(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv)
+ return false;
+
+ if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1 &&
+ le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_STRIPE)
+ goto insane;
+
+ if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
+ goto insane;
+
+ if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
+ goto insane;
+
+ return true;
+insane:
+ LMV_DEBUG(D_ERROR, lmv, "insane");
+ return false;
+}
+
+static inline bool lmv_is_splitting(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_merging(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_migrating(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane(lmv))
+ return false;
+
+ return lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_restriping(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_layout_changing(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));