X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_fid.h;h=55ecd7726d7a579a4f0e6f911f636da383ee2815;hb=383acceb3a045098ea4b93ed07633f701b04f4fe;hp=f31c4b5d27416882c6dfa4d7acd311b44b52dc66;hpb=0fabe8d4962e93a0bf0207edb7c0ffe87ba21be7;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_fid.h b/lustre/include/lustre_fid.h index f31c4b5..55ecd77 100644 --- a/lustre/include/lustre_fid.h +++ b/lustre/include/lustre_fid.h @@ -88,24 +88,15 @@ enum { LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH) }; -/** special fid seq: used for local object create. */ -#define FID_SEQ_LOCAL_FILE (FID_SEQ_START + 1) - -/** special fid seq: used for .lustre objects. */ -#define LU_DOT_LUSTRE_SEQ (FID_SEQ_START + 0x02ULL) - -/* Note that reserved SEQ numbers below 12 will conflict with ldiskfs - * inodes in the IGIF namespace, so these reserved SEQ numbers must be - * used sparingly until ldiskfs-based MDT backends and/or IGIF FIDs - * have been completely removed. */ - -/** fid sequence for distributed fs objects */ -#define FID_SEQ_DISTRIBUTED_START (FID_SEQ_START + 0x400ULL) +enum { + /** 2^6 FIDs for OI containers */ + OSD_OI_FID_OID_BITS = 6, + /** reserve enough FIDs in case we want more in the future */ + OSD_OI_FID_OID_BITS_MAX = 10, +}; /** special OID for local objects */ -enum { - /** \see osd_oi_index_create */ - OSD_OI_FID_16_OID = 2UL, +enum local_oid { /** \see fld_mod_init */ FLD_INDEX_OID = 3UL, /** \see fid_mod_init */ @@ -121,6 +112,22 @@ enum { MDT_LAST_RECV_OID = 11UL, /** \see osd_mod_init */ OSD_REM_OBJ_DIR_OID = 12UL, + OSD_FS_ROOT_OID = 13UL, + ACCT_USER_OID = 15UL, + ACCT_GROUP_OID = 16UL, + OFD_LAST_RECV_OID = 19UL, + OFD_GROUP0_LAST_OID = 20UL, + OFD_GROUP4K_LAST_OID = 20UL+4096, + OFD_LAST_GROUP_OID = 4117UL, + LLOG_CATALOGS_OID = 4118UL, + MGS_CONFIGS_OID = 4119UL, + OFD_HEALTH_CHECK_OID = 4120UL, + + /** first OID for first OI fid */ + OSD_OI_FID_OID_FIRST = 5000UL, + /** reserve enough in case we want to have more in the future */ + OSD_OI_FID_OID_MAX = OSD_OI_FID_OID_FIRST + + (1UL << OSD_OI_FID_OID_BITS_MAX), }; static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid) @@ -178,6 +185,10 @@ struct lu_client_seq { /* Seq-server for direct talking */ struct lu_server_seq *lcs_srv; + + /* wait queue for fid allocation and update indicator */ + cfs_waitq_t lcs_waitq; + int lcs_update; }; /* server sequence manager interface */ @@ -228,8 +239,8 @@ struct lu_server_seq { */ __u64 lss_set_width; - /* transaction no of seq update write operation */ - __u64 lss_set_transno; + /* sync is needed for update operation */ + __u32 lss_need_sync; /** * Pointer to site object, required to access site fld. */ @@ -272,8 +283,10 @@ void seq_client_fini(struct lu_client_seq *seq); void seq_client_flush(struct lu_client_seq *seq); -int seq_client_alloc_fid(struct lu_client_seq *seq, +int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq, struct lu_fid *fid); +int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq, + seqno_t *seqnr); /* Fids common stuff */ int fid_is_local(const struct lu_env *env, @@ -329,15 +342,16 @@ fid_build_pdo_res_name(const struct lu_fid *f, /** - * Flatten 128-bit FID values into a 64-bit value for - * use as an inode number. For non-IGIF FIDs this - * starts just over 2^32, and continues without conflict - * until 2^64, at which point we wrap the high 32 bits - * of the SEQ into the range where there may not be many - * OID values in use, to minimize the risk of conflict. + * Flatten 128-bit FID values into a 64-bit value for use as an inode number. + * For non-IGIF FIDs this starts just over 2^32, and continues without + * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ + * into the range where there may not be many OID values in use, to minimize + * the risk of conflict. * - * The time between re-used inode numbers is very long - - * 2^32 SEQ numbers, or about 2^32 client mounts. */ + * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, + * the time between re-used inode numbers is very long - 2^40 SEQ numbers, + * or about 2^40 client mounts, if clients create less than 2^24 files/mount. + */ static inline __u64 fid_flatten(const struct lu_fid *fid) { __u64 ino; @@ -350,11 +364,18 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) seq = fid_seq(fid); - ino = (seq << 24) + ((seq >> (64-8)) & 0xffffff0000ULL) + fid_oid(fid); + ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); RETURN(ino ? ino : fid_oid(fid)); } +static inline __u32 fid_hash(const struct lu_fid *f, int bits) +{ + /* all objects with same id and different versions will belong to same + * collisions list. */ + return cfs_hash_long(fid_flatten(f), bits); +} + /** * map fid to 32 bit value for ino on 32bit systems. */ static inline __u32 fid_flatten32(const struct lu_fid *fid) @@ -369,15 +390,14 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) seq = fid_seq(fid) - FID_SEQ_START; - /* - map the high bits of the OID into higher bits of the inode number so that - inodes generated at about the same time have a reduced chance of collisions. - This will give a period of 1024 clients and 128 k = 128M inodes without collisions. - */ - + /* Map the high bits of the OID into higher bits of the inode number so + * that inodes generated at about the same time have a reduced chance + * of collisions. This will give a period of 2^12 = 1024 unique clients + * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects + * (from OID), or up to 128M inodes without collisions for new files. */ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + (seq >> (64 - (40-8)) & 0xffffff00) + - (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 16); + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); RETURN(ino ? ino : fid_oid(fid)); } @@ -390,28 +410,32 @@ static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq { dst->lsr_start = cpu_to_le64(src->lsr_start); dst->lsr_end = cpu_to_le64(src->lsr_end); - dst->lsr_mdt = cpu_to_le32(src->lsr_mdt); + dst->lsr_index = cpu_to_le32(src->lsr_index); + dst->lsr_flags = cpu_to_le32(src->lsr_flags); } static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = le64_to_cpu(src->lsr_start); dst->lsr_end = le64_to_cpu(src->lsr_end); - dst->lsr_mdt = le32_to_cpu(src->lsr_mdt); + dst->lsr_index = le32_to_cpu(src->lsr_index); + dst->lsr_flags = le32_to_cpu(src->lsr_flags); } static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = cpu_to_be64(src->lsr_start); dst->lsr_end = cpu_to_be64(src->lsr_end); - dst->lsr_mdt = cpu_to_be32(src->lsr_mdt); + dst->lsr_index = cpu_to_be32(src->lsr_index); + dst->lsr_flags = cpu_to_be32(src->lsr_flags); } static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = be64_to_cpu(src->lsr_start); dst->lsr_end = be64_to_cpu(src->lsr_end); - dst->lsr_mdt = be32_to_cpu(src->lsr_mdt); + dst->lsr_index = be32_to_cpu(src->lsr_index); + dst->lsr_flags = be32_to_cpu(src->lsr_flags); } /** @} fid */