X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_fid.h;h=68947353acda24865b61a7be83ec2df48810527e;hb=e99739d9abf08ca8d76df281e481924fc2ae6849;hp=d94a2f67a4b1eb52557adb1e90f7ebcfa37d08e8;hpb=70e80ade90af09300396706b8910e196a7928520;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_fid.h b/lustre/include/lustre_fid.h index d94a2f6..6894735 100644 --- a/lustre/include/lustre_fid.h +++ b/lustre/include/lustre_fid.h @@ -16,8 +16,8 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -41,44 +41,81 @@ #ifndef __LINUX_FID_H #define __LINUX_FID_H +/** \defgroup fid fid + * + * @{ + */ + /* * struct lu_fid */ +#include #include #include #include -#include struct lu_site; struct lu_context; /* Whole sequences space range and zero range definitions */ -extern const struct lu_range LUSTRE_SEQ_SPACE_RANGE; -extern const struct lu_range LUSTRE_SEQ_ZERO_RANGE; +extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE; +extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE; extern const struct lu_fid LUSTRE_BFL_FID; +extern const struct lu_fid LU_OBF_FID; +extern const struct lu_fid LU_DOT_LUSTRE_FID; enum { /* - * This is how may FIDs may be allocated in one sequence. 16384 for - * now. + * This is how may FIDs may be allocated in one sequence(128k) */ - LUSTRE_SEQ_MAX_WIDTH = 0x0000000000004000ULL, + LUSTRE_SEQ_MAX_WIDTH = 0x0000000000020000ULL, /* - * How many sequences may be allocate for meta-sequence (this is 128 - * sequences). + * How many sequences to allocate to a client at once. */ - /* changed to 16 to avoid overflow in test11 */ - LUSTRE_SEQ_META_WIDTH = 0x0000000000000010ULL, + LUSTRE_SEQ_META_WIDTH = 0x0000000000000001ULL, + + /* + * seq allocation pool size. + */ + LUSTRE_SEQ_BATCH_WIDTH = LUSTRE_SEQ_META_WIDTH * 1000, /* * This is how many sequences may be in one super-sequence allocated to * MDTs. */ - LUSTRE_SEQ_SUPER_WIDTH = (LUSTRE_SEQ_META_WIDTH * LUSTRE_SEQ_META_WIDTH) + LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH) }; +/** special OID for local objects */ +enum local_oid { + /** \see osd_oi_index_create */ + OSD_OI_FID_16_OID = 2UL, + /** \see fld_mod_init */ + FLD_INDEX_OID = 3UL, + /** \see fid_mod_init */ + FID_SEQ_CTL_OID = 4UL, + FID_SEQ_SRV_OID = 5UL, + /** \see mdd_mod_init */ + MDD_ROOT_INDEX_OID = 6UL, + MDD_ORPHAN_OID = 7UL, + MDD_LOV_OBJ_OID = 8UL, + MDD_CAPA_KEYS_OID = 9UL, + MDD_OBJECTS_OID = 10UL, + /** \see mdt_mod_init */ + MDT_LAST_RECV_OID = 11UL, + /** \see osd_mod_init */ + OSD_REM_OBJ_DIR_OID = 12UL, +}; + +static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid) +{ + fid->f_seq = FID_SEQ_LOCAL_FILE; + fid->f_oid = oid; + fid->f_ver = 0; +} + enum lu_mgr_type { LUSTRE_SEQ_SERVER, LUSTRE_SEQ_CONTROLLER @@ -95,14 +132,14 @@ struct lu_server_seq; struct lu_client_seq { /* Sequence-controller export. */ struct obd_export *lcs_exp; - struct semaphore lcs_sem; + cfs_semaphore_t lcs_sem; /* * Range of allowed for allocation sequeces. When using lu_client_seq on * clients, this contains meta-sequence range. And for servers this * contains super-sequence range. */ - struct lu_range lcs_space; + struct lu_seq_range lcs_space; /* Seq related proc */ cfs_proc_dir_entry_t *lcs_proc_dir; @@ -127,12 +164,20 @@ struct lu_client_seq { /* Seq-server for direct talking */ struct lu_server_seq *lcs_srv; + + /* wait queue for fid allocation and update indicator */ + cfs_waitq_t lcs_waitq; + int lcs_update; }; /* server sequence manager interface */ struct lu_server_seq { /* Available sequences space */ - struct lu_range lss_space; + struct lu_seq_range lss_space; + + /* keeps highwater in lsr_end for seq allocation algorithm */ + struct lu_seq_range lss_lowater_set; + struct lu_seq_range lss_hiwater_set; /* * Device for server side seq manager needs (saving sequences to backing @@ -153,7 +198,7 @@ struct lu_server_seq { struct lu_client_seq *lss_cli; /* Semaphore for protecting allocation */ - struct semaphore lss_sem; + cfs_semaphore_t lss_sem; /* * Service uuid, passed from MDT + seq name to form unique seq name to @@ -166,6 +211,19 @@ struct lu_server_seq { * LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH. */ __u64 lss_width; + + /* + * minimum lss_alloc_set size that should be allocated from + * lss_space + */ + __u64 lss_set_width; + + /* transaction no of seq update write operation */ + __u64 lss_set_transno; + /** + * Pointer to site object, required to access site fld. + */ + struct md_site *lss_site; }; int seq_query(struct com_thread_info *info); @@ -175,19 +233,18 @@ int seq_server_init(struct lu_server_seq *seq, struct dt_device *dev, const char *prefix, enum lu_mgr_type type, + struct md_site *ls, const struct lu_env *env); void seq_server_fini(struct lu_server_seq *seq, const struct lu_env *env); int seq_server_alloc_super(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, + struct lu_seq_range *out, const struct lu_env *env); int seq_server_alloc_meta(struct lu_server_seq *seq, - struct lu_range *in, - struct lu_range *out, + struct lu_seq_range *out, const struct lu_env *env); int seq_server_set_cli(struct lu_server_seq *seq, @@ -209,7 +266,8 @@ int seq_client_alloc_fid(struct lu_client_seq *seq, struct lu_fid *fid); /* Fids common stuff */ -int fid_is_local(struct lu_site *site, const struct lu_fid *fid); +int fid_is_local(const struct lu_env *env, + struct lu_site *site, const struct lu_fid *fid); /* fid locking */ @@ -259,18 +317,104 @@ fid_build_pdo_res_name(const struct lu_fid *f, return name; } + +/** + * Flatten 128-bit FID values into a 64-bit value for use as an inode number. + * For non-IGIF FIDs this starts just over 2^32, and continues without + * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ + * into the range where there may not be many OID values in use, to minimize + * the risk of conflict. + * + * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, + * the time between re-used inode numbers is very long - 2^40 SEQ numbers, + * or about 2^40 client mounts, if clients create less than 2^24 files/mount. + */ static inline __u64 fid_flatten(const struct lu_fid *fid) { - return (fid_seq(fid) - 1) * LUSTRE_SEQ_MAX_WIDTH + fid_oid(fid); + __u64 ino; + __u64 seq; + + if (fid_is_igif(fid)) { + ino = lu_igif_ino(fid); + RETURN(ino); + } + + seq = fid_seq(fid); + + ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); + + RETURN(ino ? ino : fid_oid(fid)); +} + +static inline __u32 fid_hash(const struct lu_fid *f, int bits) +{ + /* all objects with same id and different versions will belong to same + * collisions list. */ + return cfs_hash_long(fid_flatten(f), bits); +} + +/** + * map fid to 32 bit value for ino on 32bit systems. */ +static inline __u32 fid_flatten32(const struct lu_fid *fid) +{ + __u32 ino; + __u64 seq; + + if (fid_is_igif(fid)) { + ino = lu_igif_ino(fid); + RETURN(ino); + } + + seq = fid_seq(fid) - FID_SEQ_START; + + /* Map the high bits of the OID into higher bits of the inode number so + * that inodes generated at about the same time have a reduced chance + * of collisions. This will give a period of 2^12 = 1024 unique clients + * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects + * (from OID), or up to 128M inodes without collisions for new files. */ + ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + + (seq >> (64 - (40-8)) & 0xffffff00) + + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); + + RETURN(ino ? ino : fid_oid(fid)); } #define LUSTRE_SEQ_SRV_NAME "seq_srv" #define LUSTRE_SEQ_CTL_NAME "seq_ctl" /* Range common stuff */ -void range_cpu_to_le(struct lu_range *dst, const struct lu_range *src); -void range_cpu_to_be(struct lu_range *dst, const struct lu_range *src); -void range_le_to_cpu(struct lu_range *dst, const struct lu_range *src); -void range_be_to_cpu(struct lu_range *dst, const struct lu_range *src); +static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src) +{ + dst->lsr_start = cpu_to_le64(src->lsr_start); + dst->lsr_end = cpu_to_le64(src->lsr_end); + dst->lsr_index = cpu_to_le32(src->lsr_index); + dst->lsr_flags = cpu_to_le32(src->lsr_flags); +} + +static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) +{ + dst->lsr_start = le64_to_cpu(src->lsr_start); + dst->lsr_end = le64_to_cpu(src->lsr_end); + dst->lsr_index = le32_to_cpu(src->lsr_index); + dst->lsr_flags = le32_to_cpu(src->lsr_flags); +} + +static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src) +{ + dst->lsr_start = cpu_to_be64(src->lsr_start); + dst->lsr_end = cpu_to_be64(src->lsr_end); + dst->lsr_index = cpu_to_be32(src->lsr_index); + dst->lsr_flags = cpu_to_be32(src->lsr_flags); +} + +static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src) +{ + dst->lsr_start = be64_to_cpu(src->lsr_start); + dst->lsr_end = be64_to_cpu(src->lsr_end); + dst->lsr_index = be32_to_cpu(src->lsr_index); + dst->lsr_flags = be32_to_cpu(src->lsr_flags); +} + +/** @} fid */ #endif /* __LINUX_FID_H */