X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_fid.h;h=32d680ce84fc22508287f8a570416dff6c7241fa;hp=51a5ae5312db558b09641699a97b8d04d32f2f87;hb=4f91d5161d005eed6ff7a9fe6abea99690f4bcb7;hpb=09803193a151902acc39720946b831b90655c4a8 diff --git a/lustre/include/lustre_fid.h b/lustre/include/lustre_fid.h index 51a5ae5..32d680c 100644 --- a/lustre/include/lustre_fid.h +++ b/lustre/include/lustre_fid.h @@ -26,8 +26,10 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -49,11 +51,11 @@ /* * struct lu_fid */ +#include #include #include #include -#include struct lu_site; struct lu_context; @@ -88,24 +90,15 @@ enum { LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH) }; -/** special fid seq: used for local object create. */ -#define FID_SEQ_LOCAL_FILE (FID_SEQ_START + 1) - -/** special fid seq: used for .lustre objects. */ -#define LU_DOT_LUSTRE_SEQ (FID_SEQ_START + 0x02ULL) - -/* Note that reserved SEQ numbers below 12 will conflict with ldiskfs - * inodes in the IGIF namespace, so these reserved SEQ numbers must be - * used sparingly until ldiskfs-based MDT backends and/or IGIF FIDs - * have been completely removed. */ - -/** fid sequence for distributed fs objects */ -#define FID_SEQ_DISTRIBUTED_START (FID_SEQ_START + 0x400ULL) +enum { + /** 2^6 FIDs for OI containers */ + OSD_OI_FID_OID_BITS = 6, + /** reserve enough FIDs in case we want more in the future */ + OSD_OI_FID_OID_BITS_MAX = 10, +}; /** special OID for local objects */ -enum { - /** \see osd_oi_index_create */ - OSD_OI_FID_16_OID = 2UL, +enum local_oid { /** \see fld_mod_init */ FLD_INDEX_OID = 3UL, /** \see fid_mod_init */ @@ -121,6 +114,22 @@ enum { MDT_LAST_RECV_OID = 11UL, /** \see osd_mod_init */ OSD_REM_OBJ_DIR_OID = 12UL, + OSD_FS_ROOT_OID = 13UL, + ACCT_USER_OID = 15UL, + ACCT_GROUP_OID = 16UL, + OFD_LAST_RECV_OID = 19UL, + OFD_GROUP0_LAST_OID = 20UL, + OFD_GROUP4K_LAST_OID = 20UL+4096, + OFD_LAST_GROUP_OID = 4117UL, + LLOG_CATALOGS_OID = 4118UL, + MGS_CONFIGS_OID = 4119UL, + OFD_HEALTH_CHECK_OID = 4120UL, + + /** first OID for first OI fid */ + OSD_OI_FID_OID_FIRST = 5000UL, + /** reserve enough in case we want to have more in the future */ + OSD_OI_FID_OID_MAX = OSD_OI_FID_OID_FIRST + + (1UL << OSD_OI_FID_OID_BITS_MAX), }; static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid) @@ -178,6 +187,10 @@ struct lu_client_seq { /* Seq-server for direct talking */ struct lu_server_seq *lcs_srv; + + /* wait queue for fid allocation and update indicator */ + cfs_waitq_t lcs_waitq; + int lcs_update; }; /* server sequence manager interface */ @@ -228,8 +241,8 @@ struct lu_server_seq { */ __u64 lss_set_width; - /* transaction no of seq update write operation */ - __u64 lss_set_transno; + /* sync is needed for update operation */ + __u32 lss_need_sync; /** * Pointer to site object, required to access site fld. */ @@ -272,8 +285,10 @@ void seq_client_fini(struct lu_client_seq *seq); void seq_client_flush(struct lu_client_seq *seq); -int seq_client_alloc_fid(struct lu_client_seq *seq, +int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq, struct lu_fid *fid); +int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq, + seqno_t *seqnr); /* Fids common stuff */ int fid_is_local(const struct lu_env *env, @@ -283,15 +298,12 @@ int fid_is_local(const struct lu_env *env, struct ldlm_namespace; -enum { - LUSTRE_RES_ID_SEQ_OFF = 0, - LUSTRE_RES_ID_OID_OFF = 1, - LUSTRE_RES_ID_VER_OFF = 2, - LUSTRE_RES_ID_HSH_OFF = 3 -}; - /* - * Build (DLM) resource name from fid. + * Build (DLM) resource name from FID. + * + * NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2], + * but was moved into name[1] along with the OID to avoid consuming the + * renaming name[2,3] fields that need to be used for the quota identifier. */ static inline struct ldlm_res_id * fid_build_reg_res_name(const struct lu_fid *f, @@ -299,8 +311,7 @@ fid_build_reg_res_name(const struct lu_fid *f, { memset(name, 0, sizeof *name); name->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(f); - name->name[LUSTRE_RES_ID_OID_OFF] = fid_oid(f); - name->name[LUSTRE_RES_ID_VER_OFF] = fid_ver(f); + name->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(f); return name; } @@ -310,10 +321,8 @@ fid_build_reg_res_name(const struct lu_fid *f, static inline int fid_res_name_eq(const struct lu_fid *f, const struct ldlm_res_id *name) { - return - name->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(f) && - name->name[LUSTRE_RES_ID_OID_OFF] == fid_oid(f) && - name->name[LUSTRE_RES_ID_VER_OFF] == fid_ver(f); + return name->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(f) && + name->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(f); } @@ -329,15 +338,16 @@ fid_build_pdo_res_name(const struct lu_fid *f, /** - * Flatten 128-bit FID values into a 64-bit value for - * use as an inode number. For non-IGIF FIDs this - * starts just over 2^32, and continues without conflict - * until 2^64, at which point we wrap the high 32 bits - * of the SEQ into the range where there may not be many - * OID values in use, to minimize the risk of conflict. + * Flatten 128-bit FID values into a 64-bit value for use as an inode number. + * For non-IGIF FIDs this starts just over 2^32, and continues without + * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ + * into the range where there may not be many OID values in use, to minimize + * the risk of conflict. * - * The time between re-used inode numbers is very long - - * 2^32 SEQ numbers, or about 2^32 client mounts. */ + * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, + * the time between re-used inode numbers is very long - 2^40 SEQ numbers, + * or about 2^40 client mounts, if clients create less than 2^24 files/mount. + */ static inline __u64 fid_flatten(const struct lu_fid *fid) { __u64 ino; @@ -350,11 +360,18 @@ static inline __u64 fid_flatten(const struct lu_fid *fid) seq = fid_seq(fid); - ino = (seq << 24) + ((seq >> (64-8)) & 0xffffff0000ULL) + fid_oid(fid); + ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); RETURN(ino ? ino : fid_oid(fid)); } +static inline __u32 fid_hash(const struct lu_fid *f, int bits) +{ + /* all objects with same id and different versions will belong to same + * collisions list. */ + return cfs_hash_long(fid_flatten(f), bits); +} + /** * map fid to 32 bit value for ino on 32bit systems. */ static inline __u32 fid_flatten32(const struct lu_fid *fid) @@ -369,8 +386,15 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid) seq = fid_seq(fid) - FID_SEQ_START; - ino = ((seq & 0xfffffULL) << 12) + ((seq >> 8) & 0xfffff000) + - (seq >> (64 - (40-8)) & 0xffffff00) + fid_oid(fid); + /* Map the high bits of the OID into higher bits of the inode number so + * that inodes generated at about the same time have a reduced chance + * of collisions. This will give a period of 2^12 = 1024 unique clients + * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects + * (from OID), or up to 128M inodes without collisions for new files. */ + ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + + (seq >> (64 - (40-8)) & 0xffffff00) + + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); + RETURN(ino ? ino : fid_oid(fid)); } @@ -382,28 +406,32 @@ static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq { dst->lsr_start = cpu_to_le64(src->lsr_start); dst->lsr_end = cpu_to_le64(src->lsr_end); - dst->lsr_mdt = cpu_to_le32(src->lsr_mdt); + dst->lsr_index = cpu_to_le32(src->lsr_index); + dst->lsr_flags = cpu_to_le32(src->lsr_flags); } static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = le64_to_cpu(src->lsr_start); dst->lsr_end = le64_to_cpu(src->lsr_end); - dst->lsr_mdt = le32_to_cpu(src->lsr_mdt); + dst->lsr_index = le32_to_cpu(src->lsr_index); + dst->lsr_flags = le32_to_cpu(src->lsr_flags); } static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = cpu_to_be64(src->lsr_start); dst->lsr_end = cpu_to_be64(src->lsr_end); - dst->lsr_mdt = cpu_to_be32(src->lsr_mdt); + dst->lsr_index = cpu_to_be32(src->lsr_index); + dst->lsr_flags = cpu_to_be32(src->lsr_flags); } static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) { dst->lsr_start = be64_to_cpu(src->lsr_start); dst->lsr_end = be64_to_cpu(src->lsr_end); - dst->lsr_mdt = be32_to_cpu(src->lsr_mdt); + dst->lsr_index = be32_to_cpu(src->lsr_index); + dst->lsr_flags = be32_to_cpu(src->lsr_flags); } /** @} fid */