X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Finclude%2Flustre_lmv.h;h=a323dd75ec266a6254aa3fe457f1721a6befea7e;hb=f5564c35ede12659acedd14845cb36e70563233a;hp=6f302b3d2ea822e189ab898b8f8f696da55a911f;hpb=5ecd5a5ecfb880236b5fbc23621102239de5f5d6;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_lmv.h b/lustre/include/lustre_lmv.h index 6f302b3..a323dd7 100644 --- a/lustre/include/lustre_lmv.h +++ b/lustre/include/lustre_lmv.h @@ -50,42 +50,77 @@ struct lmv_stripe_md { __u32 lsm_md_layout_version; __u32 lsm_md_migrate_offset; __u32 lsm_md_migrate_hash; - __u32 lsm_md_default_count; - __u32 lsm_md_default_index; char lsm_md_pool_name[LOV_MAXPOOLNAME + 1]; struct lmv_oinfo lsm_md_oinfo[0]; }; -static inline bool lmv_dir_striped(const struct lmv_stripe_md *lsm) +struct lmv_stripe_object { + atomic_t lso_refs; + union { + struct lmv_stripe_md lso_lsm; + struct lmv_foreign_md lso_lfm; + }; +}; + +static inline bool lmv_dir_striped(const struct lmv_stripe_object *lso) { - return lsm && lsm->lsm_md_magic == LMV_MAGIC; + return lso && lso->lso_lsm.lsm_md_magic == LMV_MAGIC; } -static inline bool lmv_dir_foreign(const struct lmv_stripe_md *lsm) +static inline bool lmv_dir_foreign(const struct lmv_stripe_object *lso) { - return lsm && lsm->lsm_md_magic == LMV_MAGIC_FOREIGN; + return lso && lso->lso_lsm.lsm_md_magic == LMV_MAGIC_FOREIGN; } -static inline bool lmv_dir_layout_changing(const struct lmv_stripe_md *lsm) +static inline bool lmv_dir_layout_changing(const struct lmv_stripe_object *lso) { - return lmv_dir_striped(lsm) && - lmv_hash_is_layout_changing(lsm->lsm_md_hash_type); + return lmv_dir_striped(lso) && + lmv_hash_is_layout_changing(lso->lso_lsm.lsm_md_hash_type); } -static inline bool lmv_dir_bad_hash(const struct lmv_stripe_md *lsm) +static inline bool lmv_dir_bad_hash(const struct lmv_stripe_object *lso) { - if (!lmv_dir_striped(lsm)) + if (!lmv_dir_striped(lso)) return false; - if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_BAD_TYPE) + if (lso->lso_lsm.lsm_md_hash_type & LMV_HASH_FLAG_BAD_TYPE) return true; - return !lmv_is_known_hash_type(lsm->lsm_md_hash_type); + return !lmv_is_known_hash_type(lso->lso_lsm.lsm_md_hash_type); } -static inline bool -lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) +static inline __u8 lmv_inherit_next(__u8 inherit) +{ + if (inherit == LMV_INHERIT_END || inherit == LMV_INHERIT_NONE) + return LMV_INHERIT_NONE; + + if (inherit == LMV_INHERIT_UNLIMITED || inherit > LMV_INHERIT_MAX) + return inherit; + + return inherit - 1; +} + +static inline __u8 lmv_inherit_rr_next(__u8 inherit_rr) +{ + if (inherit_rr == LMV_INHERIT_RR_NONE || + inherit_rr == LMV_INHERIT_RR_UNLIMITED || + inherit_rr > LMV_INHERIT_RR_MAX) + return inherit_rr; + + return inherit_rr - 1; +} + +static inline bool lmv_is_inheritable(__u8 inherit) { + return inherit == LMV_INHERIT_UNLIMITED || + (inherit > LMV_INHERIT_END && inherit <= LMV_INHERIT_MAX); +} + +static inline bool lsm_md_eq(const struct lmv_stripe_object *lso1, + const struct lmv_stripe_object *lso2) +{ + const struct lmv_stripe_md *lsm1 = &lso1->lso_lsm; + const struct lmv_stripe_md *lsm2 = &lso2->lso_lsm; __u32 idx; if (lsm1->lsm_md_magic != lsm2->lsm_md_magic || @@ -93,6 +128,8 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) lsm1->lsm_md_master_mdt_index != lsm2->lsm_md_master_mdt_index || lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type || + lsm1->lsm_md_max_inherit != lsm2->lsm_md_max_inherit || + lsm1->lsm_md_max_inherit_rr != lsm2->lsm_md_max_inherit_rr || lsm1->lsm_md_layout_version != lsm2->lsm_md_layout_version || lsm1->lsm_md_migrate_offset != @@ -103,47 +140,80 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2) sizeof(lsm1->lsm_md_pool_name)) != 0) return false; - if (lmv_dir_striped(lsm1)) { + if (lmv_dir_striped(lso1)) { for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) { if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid, &lsm2->lsm_md_oinfo[idx].lmo_fid)) return false; } + } else if (lsm1->lsm_md_magic == LMV_USER_MAGIC_SPECIFIC) { + for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) { + if (lsm1->lsm_md_oinfo[idx].lmo_mds != + lsm2->lsm_md_oinfo[idx].lmo_mds) + return false; + } } return true; } -static inline void lsm_md_dump(int mask, const struct lmv_stripe_md *lsm) +static inline void +lmv_stripe_object_dump(int mask, const struct lmv_stripe_object *lsmo) { - bool valid_hash = lmv_dir_bad_hash(lsm); + const struct lmv_stripe_md *lsm = &lsmo->lso_lsm; int i; - /* If lsm_md_magic == LMV_MAGIC_FOREIGN pool_name may not be a null - * terminated string so only print LOV_MAXPOOLNAME bytes. - */ CDEBUG(mask, - "magic %#x stripe count %d master mdt %d hash type %s:%#x max inherit %hhu version %d migrate offset %d migrate hash %#x pool %.*s\n", - lsm->lsm_md_magic, lsm->lsm_md_stripe_count, - lsm->lsm_md_master_mdt_index, - valid_hash ? "invalid hash" : - mdt_hash_name[lsm->lsm_md_hash_type & (LMV_HASH_TYPE_MAX - 1)], - lsm->lsm_md_hash_type, lsm->lsm_md_max_inherit, - lsm->lsm_md_layout_version, - lsm->lsm_md_migrate_offset, lsm->lsm_md_migrate_hash, + "dump LMV: refs %u magic=%#x count=%u index=%u hash=%s:%#x max_inherit=%hhu max_inherit_rr=%hhu version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n", + lsm->lsm_md_magic, atomic_read(&lsmo->lso_refs), + lsm->lsm_md_stripe_count, lsm->lsm_md_master_mdt_index, + lmv_is_known_hash_type(lsm->lsm_md_hash_type) ? + mdt_hash_name[lsm->lsm_md_hash_type & LMV_HASH_TYPE_MASK] : + "invalid", lsm->lsm_md_hash_type, + lsm->lsm_md_max_inherit, lsm->lsm_md_max_inherit_rr, + lsm->lsm_md_layout_version, lsm->lsm_md_migrate_offset, + lmv_is_known_hash_type(lsm->lsm_md_migrate_hash) ? + mdt_hash_name[lsm->lsm_md_migrate_hash & LMV_HASH_TYPE_MASK] : + "invalid", lsm->lsm_md_migrate_hash, LOV_MAXPOOLNAME, lsm->lsm_md_pool_name); - if (!lmv_dir_striped(lsm)) + if (!lmv_dir_striped(lsmo)) return; for (i = 0; i < lsm->lsm_md_stripe_count; i++) - CDEBUG(mask, "stripe[%d] "DFID"\n", - i, PFID(&lsm->lsm_md_oinfo[i].lmo_fid)); + CDEBUG_LIMIT(mask, "stripe[%d] "DFID"\n", + i, PFID(&lsm->lsm_md_oinfo[i].lmo_fid)); +} + +static inline bool +lmv_object_inherited(const struct lmv_stripe_object *plsm, + const struct lmv_stripe_object *clsm) +{ + return plsm && clsm && + plsm->lso_lsm.lsm_md_magic == + clsm->lso_lsm.lsm_md_magic && + plsm->lso_lsm.lsm_md_stripe_count == + clsm->lso_lsm.lsm_md_stripe_count && + plsm->lso_lsm.lsm_md_master_mdt_index == + clsm->lso_lsm.lsm_md_master_mdt_index && + plsm->lso_lsm.lsm_md_hash_type == + clsm->lso_lsm.lsm_md_hash_type && + lmv_inherit_next(plsm->lso_lsm.lsm_md_max_inherit) == + clsm->lso_lsm.lsm_md_max_inherit && + lmv_inherit_rr_next(plsm->lso_lsm.lsm_md_max_inherit_rr) == + clsm->lso_lsm.lsm_md_max_inherit_rr; } union lmv_mds_md; -void lmv_free_memmd(struct lmv_stripe_md *lsm); +struct lmv_stripe_object *lmv_stripe_object_alloc(__u32 magic, + const union lmv_mds_md *lmm, + size_t lmm_size); + +void lmv_stripe_object_put(struct lmv_stripe_object **lsm_obj); + +struct lmv_stripe_object * + lmv_stripe_object_get(struct lmv_stripe_object *lsm_obj); static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst, const struct lmv_mds_md_v1 *lmv_src) @@ -240,7 +310,7 @@ static inline __u32 crush_hash(__u32 a, __u32 b) * algorithm. */ static inline unsigned int -lmv_hash_crush(unsigned int count, const char *name, int namelen) +lmv_hash_crush(unsigned int count, const char *name, int namelen, bool crush2) { unsigned long long straw; unsigned long long highest_straw = 0; @@ -253,10 +323,10 @@ lmv_hash_crush(unsigned int count, const char *name, int namelen) * 1. rsync: ..XXXXXX * 2. dstripe: .XXXXXXXX */ - if (lu_name_is_temp_file(name, namelen, true, 6)) { + if (lu_name_is_temp_file(name, namelen, true, 6, crush2)) { name++; namelen -= 8; - } else if (lu_name_is_temp_file(name, namelen, false, 8)) { + } else if (lu_name_is_temp_file(name, namelen, false, 8, crush2)) { namelen -= 9; } else if (lu_name_is_backup_file(name, namelen, &i)) { LASSERT(i < namelen); @@ -335,7 +405,11 @@ __lmv_name_to_stripe_index(__u32 hash_type, __u32 stripe_count, break; case LMV_HASH_TYPE_CRUSH: stripe_index = lmv_hash_crush(stripe_count, name, - namelen); + namelen, false); + break; + case LMV_HASH_TYPE_CRUSH2: + stripe_index = lmv_hash_crush(stripe_count, name, + namelen, true); break; default: return -EBADFD; @@ -359,14 +433,16 @@ __lmv_name_to_stripe_index(__u32 hash_type, __u32 stripe_count, static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv, const char *name, int namelen) { - if (lmv->lmv_magic == LMV_MAGIC_V1) + if (lmv->lmv_magic == LMV_MAGIC_V1 || + lmv->lmv_magic == LMV_MAGIC_STRIPE) return __lmv_name_to_stripe_index(lmv->lmv_hash_type, lmv->lmv_stripe_count, lmv->lmv_migrate_hash, lmv->lmv_migrate_offset, name, namelen, true); - if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1)) + if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) || + lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE)) return __lmv_name_to_stripe_index( le32_to_cpu(lmv->lmv_hash_type), le32_to_cpu(lmv->lmv_stripe_count), @@ -407,16 +483,19 @@ static inline bool lmv_user_magic_supported(__u32 lum_magic) lum_magic == LMV_MAGIC_FOREIGN; } -#define LMV_DEBUG(mask, lmv, msg) \ - CDEBUG(mask, \ - "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate offset=%u migrate hash=%s:%u.\n",\ - msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count, \ - (lmv)->lmv_master_mdt_index, \ - mdt_hash_name[(lmv)->lmv_hash_type & (LMV_HASH_TYPE_MAX - 1)],\ - (lmv)->lmv_hash_type, (lmv)->lmv_layout_version, \ - (lmv)->lmv_migrate_offset, \ - mdt_hash_name[(lmv)->lmv_migrate_hash & (LMV_HASH_TYPE_MAX - 1)],\ - (lmv)->lmv_migrate_hash) +#define LMV_DEBUG(mask, lmv, msg) \ + CDEBUG_LIMIT(mask, \ + "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",\ + msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count, \ + (lmv)->lmv_master_mdt_index, \ + lmv_is_known_hash_type((lmv)->lmv_hash_type) ? \ + mdt_hash_name[(lmv)->lmv_hash_type & LMV_HASH_TYPE_MASK] : \ + "invalid", (lmv)->lmv_hash_type, \ + (lmv)->lmv_layout_version, (lmv)->lmv_migrate_offset, \ + lmv_is_known_hash_type((lmv)->lmv_migrate_hash) ? \ + mdt_hash_name[(lmv)->lmv_migrate_hash & LMV_HASH_TYPE_MASK] : \ + "invalid", (lmv)->lmv_migrate_hash, \ + LOV_MAXPOOLNAME, lmv->lmv_pool_name) /* master LMV is sane */ static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv) @@ -430,12 +509,12 @@ static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv) if (le32_to_cpu(lmv->lmv_stripe_count) == 0) goto insane; - if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type))) + if (!lmv_is_sane_hash_type(le32_to_cpu(lmv->lmv_hash_type))) goto insane; return true; insane: - LMV_DEBUG(D_ERROR, lmv, "insane"); + LMV_DEBUG(D_ERROR, lmv, "unknown layout"); return false; } @@ -452,12 +531,12 @@ static inline bool lmv_is_sane2(const struct lmv_mds_md_v1 *lmv) if (le32_to_cpu(lmv->lmv_stripe_count) == 0) goto insane; - if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type))) + if (!lmv_is_sane_hash_type(le32_to_cpu(lmv->lmv_hash_type))) goto insane; return true; insane: - LMV_DEBUG(D_ERROR, lmv, "insane"); + LMV_DEBUG(D_ERROR, lmv, "unknown layout"); return false; } @@ -504,4 +583,9 @@ static inline bool lmv_is_layout_changing(const struct lmv_mds_md_v1 *lmv) lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type)); } +static inline bool lmv_is_fixed(const struct lmv_mds_md_v1 *lmv) +{ + return cpu_to_le32(lmv->lmv_hash_type) & LMV_HASH_FLAG_FIXED; +} + #endif