__u32 lsm_md_stripe_count;
__u32 lsm_md_master_mdt_index;
__u32 lsm_md_hash_type;
+ __u8 lsm_md_max_inherit;
+ __u8 lsm_md_max_inherit_rr;
__u32 lsm_md_layout_version;
__u32 lsm_md_migrate_offset;
__u32 lsm_md_migrate_hash;
- __u32 lsm_md_default_count;
- __u32 lsm_md_default_index;
char lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
struct lmv_oinfo lsm_md_oinfo[0];
};
lsm1->lsm_md_master_mdt_index !=
lsm2->lsm_md_master_mdt_index ||
lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
+ lsm1->lsm_md_max_inherit != lsm2->lsm_md_max_inherit ||
+ lsm1->lsm_md_max_inherit_rr != lsm2->lsm_md_max_inherit_rr ||
lsm1->lsm_md_layout_version !=
lsm2->lsm_md_layout_version ||
lsm1->lsm_md_migrate_offset !=
&lsm2->lsm_md_oinfo[idx].lmo_fid))
return false;
}
+ } else if (lsm1->lsm_md_magic == LMV_USER_MAGIC_SPECIFIC) {
+ for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
+ if (lsm1->lsm_md_oinfo[idx].lmo_mds !=
+ lsm2->lsm_md_oinfo[idx].lmo_mds)
+ return false;
+ }
}
return true;
{
int i;
- /* If lsm_md_magic == LMV_MAGIC_FOREIGN pool_name may not be a null
- * terminated string so only print LOV_MAXPOOLNAME bytes.
- */
- CDEBUG(mask,
- "magic %#x stripe count %d master mdt %d hash type %#x version %d migrate offset %d migrate hash %#x pool %.*s\n",
+ CDEBUG_LIMIT(mask,
+ "dump LMV: magic=%#x count=%u index=%u hash=%s:%#x max_inherit=%hhu max_inherit_rr=%hhu version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",
lsm->lsm_md_magic, lsm->lsm_md_stripe_count,
- lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
+ lsm->lsm_md_master_mdt_index,
+ lmv_is_known_hash_type(lsm->lsm_md_hash_type) ?
+ mdt_hash_name[lsm->lsm_md_hash_type & LMV_HASH_TYPE_MASK] :
+ "invalid", lsm->lsm_md_hash_type,
+ lsm->lsm_md_max_inherit, lsm->lsm_md_max_inherit_rr,
lsm->lsm_md_layout_version, lsm->lsm_md_migrate_offset,
- lsm->lsm_md_migrate_hash,
+ lmv_is_known_hash_type(lsm->lsm_md_migrate_hash) ?
+ mdt_hash_name[lsm->lsm_md_migrate_hash & LMV_HASH_TYPE_MASK] :
+ "invalid", lsm->lsm_md_migrate_hash,
LOV_MAXPOOLNAME, lsm->lsm_md_pool_name);
if (!lmv_dir_striped(lsm))
* algorithm.
*/
static inline unsigned int
-lmv_hash_crush(unsigned int count, const char *name, int namelen)
+lmv_hash_crush(unsigned int count, const char *name, int namelen, bool crush2)
{
unsigned long long straw;
unsigned long long highest_straw = 0;
* 1. rsync: .<target>.XXXXXX
* 2. dstripe: <target>.XXXXXXXX
*/
- if (lu_name_is_temp_file(name, namelen, true, 6)) {
+ if (lu_name_is_temp_file(name, namelen, true, 6, crush2)) {
name++;
namelen -= 8;
- } else if (lu_name_is_temp_file(name, namelen, false, 8)) {
+ } else if (lu_name_is_temp_file(name, namelen, false, 8, crush2)) {
namelen -= 9;
} else if (lu_name_is_backup_file(name, namelen, &i)) {
LASSERT(i < namelen);
break;
case LMV_HASH_TYPE_CRUSH:
stripe_index = lmv_hash_crush(stripe_count, name,
- namelen);
+ namelen, false);
+ break;
+ case LMV_HASH_TYPE_CRUSH2:
+ stripe_index = lmv_hash_crush(stripe_count, name,
+ namelen, true);
break;
default:
return -EBADFD;
static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv,
const char *name, int namelen)
{
- if (lmv->lmv_magic == LMV_MAGIC_V1)
+ if (lmv->lmv_magic == LMV_MAGIC_V1 ||
+ lmv->lmv_magic == LMV_MAGIC_STRIPE)
return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
lmv->lmv_stripe_count,
lmv->lmv_migrate_hash,
lmv->lmv_migrate_offset,
name, namelen, true);
- if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1))
+ if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
+ lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
return __lmv_name_to_stripe_index(
le32_to_cpu(lmv->lmv_hash_type),
le32_to_cpu(lmv->lmv_stripe_count),
lum_magic == LMV_MAGIC_FOREIGN;
}
+#define LMV_DEBUG(mask, lmv, msg) \
+ CDEBUG_LIMIT(mask, \
+ "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",\
+ msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count, \
+ (lmv)->lmv_master_mdt_index, \
+ lmv_is_known_hash_type((lmv)->lmv_hash_type) ? \
+ mdt_hash_name[(lmv)->lmv_hash_type & LMV_HASH_TYPE_MASK] : \
+ "invalid", (lmv)->lmv_hash_type, \
+ (lmv)->lmv_layout_version, (lmv)->lmv_migrate_offset, \
+ lmv_is_known_hash_type((lmv)->lmv_migrate_hash) ? \
+ mdt_hash_name[(lmv)->lmv_migrate_hash & LMV_HASH_TYPE_MASK] : \
+ "invalid", (lmv)->lmv_migrate_hash, \
+ LOV_MAXPOOLNAME, lmv->lmv_pool_name)
+
/* master LMV is sane */
static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
{
return true;
insane:
- LMV_DEBUG(D_ERROR, lmv, "insane");
+ LMV_DEBUG(D_ERROR, lmv, "unknown layout");
return false;
}
return true;
insane:
- LMV_DEBUG(D_ERROR, lmv, "insane");
+ LMV_DEBUG(D_ERROR, lmv, "unknown layout");
return false;
}
lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
}
+static inline bool lmv_is_fixed(const struct lmv_mds_md_v1 *lmv)
+{
+ return cpu_to_le32(lmv->lmv_hash_type) & LMV_HASH_FLAG_FIXED;
+}
+
+static inline __u8 lmv_inherit_next(__u8 inherit)
+{
+ if (inherit == LMV_INHERIT_END || inherit == LMV_INHERIT_NONE)
+ return LMV_INHERIT_NONE;
+
+ if (inherit == LMV_INHERIT_UNLIMITED || inherit > LMV_INHERIT_MAX)
+ return inherit;
+
+ return inherit - 1;
+}
+
+static inline __u8 lmv_inherit_rr_next(__u8 inherit_rr)
+{
+ if (inherit_rr == LMV_INHERIT_RR_NONE ||
+ inherit_rr == LMV_INHERIT_RR_UNLIMITED ||
+ inherit_rr > LMV_INHERIT_RR_MAX)
+ return inherit_rr;
+
+ return inherit_rr - 1;
+}
+
#endif