__u32 lsm_md_stripe_count;
__u32 lsm_md_master_mdt_index;
__u32 lsm_md_hash_type;
+ __u8 lsm_md_max_inherit;
+ __u8 lsm_md_max_inherit_rr;
__u32 lsm_md_layout_version;
__u32 lsm_md_migrate_offset;
__u32 lsm_md_migrate_hash;
- __u32 lsm_md_default_count;
- __u32 lsm_md_default_index;
char lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
struct lmv_oinfo lsm_md_oinfo[0];
};
return lsm && lsm->lsm_md_magic == LMV_MAGIC_FOREIGN;
}
-static inline bool lmv_dir_migrating(const struct lmv_stripe_md *lsm)
+static inline bool lmv_dir_layout_changing(const struct lmv_stripe_md *lsm)
{
return lmv_dir_striped(lsm) &&
- lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION;
+ lmv_hash_is_layout_changing(lsm->lsm_md_hash_type);
}
static inline bool lmv_dir_bad_hash(const struct lmv_stripe_md *lsm)
if (!lmv_dir_striped(lsm))
return false;
- if (lmv_dir_migrating(lsm) &&
- lsm->lsm_md_stripe_count - lsm->lsm_md_migrate_offset <= 1)
- return false;
-
if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_BAD_TYPE)
return true;
lsm1->lsm_md_master_mdt_index !=
lsm2->lsm_md_master_mdt_index ||
lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
+ lsm1->lsm_md_max_inherit != lsm2->lsm_md_max_inherit ||
+ lsm1->lsm_md_max_inherit_rr != lsm2->lsm_md_max_inherit_rr ||
lsm1->lsm_md_layout_version !=
lsm2->lsm_md_layout_version ||
lsm1->lsm_md_migrate_offset !=
&lsm2->lsm_md_oinfo[idx].lmo_fid))
return false;
}
+ } else if (lsm1->lsm_md_magic == LMV_USER_MAGIC_SPECIFIC) {
+ for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
+ if (lsm1->lsm_md_oinfo[idx].lmo_mds !=
+ lsm2->lsm_md_oinfo[idx].lmo_mds)
+ return false;
+ }
}
return true;
{
int i;
- /* If lsm_md_magic == LMV_MAGIC_FOREIGN pool_name may not be a null
- * terminated string so only print LOV_MAXPOOLNAME bytes.
- */
- CDEBUG(mask,
- "magic %#x stripe count %d master mdt %d hash type %#x version %d migrate offset %d migrate hash %#x pool %.*s\n",
+ CDEBUG_LIMIT(mask,
+ "dump LMV: magic=%#x count=%u index=%u hash=%s:%#x max_inherit=%hhu max_inherit_rr=%hhu version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",
lsm->lsm_md_magic, lsm->lsm_md_stripe_count,
- lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
+ lsm->lsm_md_master_mdt_index,
+ lmv_is_known_hash_type(lsm->lsm_md_hash_type) ?
+ mdt_hash_name[lsm->lsm_md_hash_type & LMV_HASH_TYPE_MASK] :
+ "invalid", lsm->lsm_md_hash_type,
+ lsm->lsm_md_max_inherit, lsm->lsm_md_max_inherit_rr,
lsm->lsm_md_layout_version, lsm->lsm_md_migrate_offset,
- lsm->lsm_md_migrate_hash,
+ lmv_is_known_hash_type(lsm->lsm_md_migrate_hash) ?
+ mdt_hash_name[lsm->lsm_md_migrate_hash & LMV_HASH_TYPE_MASK] :
+ "invalid", lsm->lsm_md_migrate_hash,
LOV_MAXPOOLNAME, lsm->lsm_md_pool_name);
if (!lmv_dir_striped(lsm))
le32_to_cpu(lmv_src->lmv_master_mdt_index);
lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type);
lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version);
+ if (lmv_src->lmv_stripe_count > LMV_MAX_STRIPE_COUNT)
+ return;
for (i = 0; i < lmv_src->lmv_stripe_count; i++)
fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i],
&lmv_src->lmv_stripe_fids[i]);
* algorithm.
*/
static inline unsigned int
-lmv_hash_crush(unsigned int count, const char *name, int namelen)
+lmv_hash_crush(unsigned int count, const char *name, int namelen, bool crush2)
{
unsigned long long straw;
unsigned long long highest_straw = 0;
* 1. rsync: .<target>.XXXXXX
* 2. dstripe: <target>.XXXXXXXX
*/
- if (lu_name_is_temp_file(name, namelen, true, 6)) {
+ if (lu_name_is_temp_file(name, namelen, true, 6, crush2)) {
name++;
namelen -= 8;
- } else if (lu_name_is_temp_file(name, namelen, false, 8)) {
+ } else if (lu_name_is_temp_file(name, namelen, false, 8, crush2)) {
namelen -= 9;
} else if (lu_name_is_backup_file(name, namelen, &i)) {
LASSERT(i < namelen);
return idx;
}
-static inline int lmv_name_to_stripe_index(__u32 hash_type,
- unsigned int stripe_count,
- const char *name, int namelen)
+/* directory layout may change in three ways:
+ * 1. directory migration, in its LMV source stripes are appended after
+ * target stripes, \a migrate_hash is source hash type, \a migrate_offset is
+ * target stripe count,
+ * 2. directory split, \a migrate_hash is hash type before split,
+ * \a migrate_offset is stripe count before split.
+ * 3. directory merge, \a migrate_hash is hash type after merge,
+ * \a migrate_offset is stripe count after merge.
+ */
+static inline int
+__lmv_name_to_stripe_index(__u32 hash_type, __u32 stripe_count,
+ __u32 migrate_hash, __u32 migrate_offset,
+ const char *name, int namelen, bool new_layout)
{
- unsigned int idx;
+ __u32 saved_hash = hash_type;
+ __u32 saved_count = stripe_count;
+ int stripe_index = 0;
LASSERT(namelen > 0);
LASSERT(stripe_count > 0);
- if (stripe_count == 1)
- return 0;
+ if (lmv_hash_is_splitting(hash_type)) {
+ if (!new_layout) {
+ hash_type = migrate_hash;
+ stripe_count = migrate_offset;
+ }
+ } else if (lmv_hash_is_merging(hash_type)) {
+ if (new_layout) {
+ hash_type = migrate_hash;
+ stripe_count = migrate_offset;
+ }
+ } else if (lmv_hash_is_migrating(hash_type)) {
+ if (new_layout) {
+ stripe_count = migrate_offset;
+ } else {
+ hash_type = migrate_hash;
+ stripe_count -= migrate_offset;
+ }
+ }
- switch (hash_type & LMV_HASH_TYPE_MASK) {
- case LMV_HASH_TYPE_ALL_CHARS:
- idx = lmv_hash_all_chars(stripe_count, name, namelen);
- break;
- case LMV_HASH_TYPE_FNV_1A_64:
- idx = lmv_hash_fnv1a(stripe_count, name, namelen);
- break;
- case LMV_HASH_TYPE_CRUSH:
- idx = lmv_hash_crush(stripe_count, name, namelen);
- break;
- default:
- return -EBADFD;
+ if (stripe_count > 1) {
+ switch (hash_type & LMV_HASH_TYPE_MASK) {
+ case LMV_HASH_TYPE_ALL_CHARS:
+ stripe_index = lmv_hash_all_chars(stripe_count, name,
+ namelen);
+ break;
+ case LMV_HASH_TYPE_FNV_1A_64:
+ stripe_index = lmv_hash_fnv1a(stripe_count, name,
+ namelen);
+ break;
+ case LMV_HASH_TYPE_CRUSH:
+ stripe_index = lmv_hash_crush(stripe_count, name,
+ namelen, false);
+ break;
+ case LMV_HASH_TYPE_CRUSH2:
+ stripe_index = lmv_hash_crush(stripe_count, name,
+ namelen, true);
+ break;
+ default:
+ return -EBADFD;
+ }
}
- CDEBUG(D_INFO, "name %.*s hash_type %#x idx %d/%u\n", namelen, name,
- hash_type, idx, stripe_count);
+ LASSERT(stripe_index < stripe_count);
- return idx;
+ if (!new_layout && lmv_hash_is_migrating(saved_hash))
+ stripe_index += migrate_offset;
+
+ LASSERT(stripe_index < saved_count);
+
+ CDEBUG(D_INFO, "name %.*s hash=%#x/%#x idx=%d/%u/%u under %s layout\n",
+ namelen, name, saved_hash, migrate_hash, stripe_index,
+ saved_count, migrate_offset, new_layout ? "new" : "old");
+
+ return stripe_index;
+}
+
+static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv,
+ const char *name, int namelen)
+{
+ if (lmv->lmv_magic == LMV_MAGIC_V1 ||
+ lmv->lmv_magic == LMV_MAGIC_STRIPE)
+ return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
+ lmv->lmv_stripe_count,
+ lmv->lmv_migrate_hash,
+ lmv->lmv_migrate_offset,
+ name, namelen, true);
+
+ if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
+ lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
+ return __lmv_name_to_stripe_index(
+ le32_to_cpu(lmv->lmv_hash_type),
+ le32_to_cpu(lmv->lmv_stripe_count),
+ le32_to_cpu(lmv->lmv_migrate_hash),
+ le32_to_cpu(lmv->lmv_migrate_offset),
+ name, namelen, true);
+
+ return -EINVAL;
+}
+
+static inline int lmv_name_to_stripe_index_old(struct lmv_mds_md_v1 *lmv,
+ const char *name, int namelen)
+{
+ if (lmv->lmv_magic == LMV_MAGIC_V1 ||
+ lmv->lmv_magic == LMV_MAGIC_STRIPE)
+ return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
+ lmv->lmv_stripe_count,
+ lmv->lmv_migrate_hash,
+ lmv->lmv_migrate_offset,
+ name, namelen, false);
+
+ if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
+ lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
+ return __lmv_name_to_stripe_index(
+ le32_to_cpu(lmv->lmv_hash_type),
+ le32_to_cpu(lmv->lmv_stripe_count),
+ le32_to_cpu(lmv->lmv_migrate_hash),
+ le32_to_cpu(lmv->lmv_migrate_offset),
+ name, namelen, false);
+
+ return -EINVAL;
}
static inline bool lmv_user_magic_supported(__u32 lum_magic)
lum_magic == LMV_MAGIC_FOREIGN;
}
+#define LMV_DEBUG(mask, lmv, msg) \
+ CDEBUG_LIMIT(mask, \
+ "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",\
+ msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count, \
+ (lmv)->lmv_master_mdt_index, \
+ lmv_is_known_hash_type((lmv)->lmv_hash_type) ? \
+ mdt_hash_name[(lmv)->lmv_hash_type & LMV_HASH_TYPE_MASK] : \
+ "invalid", (lmv)->lmv_hash_type, \
+ (lmv)->lmv_layout_version, (lmv)->lmv_migrate_offset, \
+ lmv_is_known_hash_type((lmv)->lmv_migrate_hash) ? \
+ mdt_hash_name[(lmv)->lmv_migrate_hash & LMV_HASH_TYPE_MASK] : \
+ "invalid", (lmv)->lmv_migrate_hash, \
+ LOV_MAXPOOLNAME, lmv->lmv_pool_name)
+
+/* master LMV is sane */
static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
{
+ if (!lmv)
+ return false;
+
if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1)
+ goto insane;
+
+ if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
+ goto insane;
+
+ if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
+ goto insane;
+
+ return true;
+insane:
+ LMV_DEBUG(D_ERROR, lmv, "unknown layout");
+ return false;
+}
+
+/* LMV can be either master or stripe LMV */
+static inline bool lmv_is_sane2(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv)
return false;
+ if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1 &&
+ le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_STRIPE)
+ goto insane;
+
if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
+ goto insane;
+
+ if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
+ goto insane;
+
+ return true;
+insane:
+ LMV_DEBUG(D_ERROR, lmv, "unknown layout");
+ return false;
+}
+
+static inline bool lmv_is_splitting(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
return false;
- if (!lmv_is_known_hash_type(lmv->lmv_hash_type))
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_merging(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
return false;
- return true;
+ return lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_migrating(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane(lmv))
+ return false;
+
+ return lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_restriping(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_layout_changing(const struct lmv_mds_md_v1 *lmv)
+{
+ if (!lmv_is_sane2(lmv))
+ return false;
+
+ return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type)) ||
+ lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
+}
+
+static inline bool lmv_is_fixed(const struct lmv_mds_md_v1 *lmv)
+{
+ return cpu_to_le32(lmv->lmv_hash_type) & LMV_HASH_FLAG_FIXED;
+}
+
+static inline __u8 lmv_inherit_next(__u8 inherit)
+{
+ if (inherit == LMV_INHERIT_END || inherit == LMV_INHERIT_NONE)
+ return LMV_INHERIT_NONE;
+
+ if (inherit == LMV_INHERIT_UNLIMITED || inherit > LMV_INHERIT_MAX)
+ return inherit;
+
+ return inherit - 1;
+}
+
+static inline __u8 lmv_inherit_rr_next(__u8 inherit_rr)
+{
+ if (inherit_rr == LMV_INHERIT_RR_NONE ||
+ inherit_rr == LMV_INHERIT_RR_UNLIMITED ||
+ inherit_rr > LMV_INHERIT_RR_MAX)
+ return inherit_rr;
+
+ return inherit_rr - 1;
}
#endif