From 0991267eab728e9a6e49a31e80ea91b865159b7f Mon Sep 17 00:00:00 2001 From: Timothy Day Date: Fri, 3 Feb 2023 03:32:59 +0000 Subject: [PATCH] LU-16518 utils: fix unused function errors Clang has default errors related to unused functions. The errors related to 'fid_flatten' and 'fid_flatten32' were resolved by moving the definitions of these functions to the 'lustre_fid' header. This is a better place for them, since they are small 'static inline' functions and has the added benefit of cutting down code duplication. The error related to the 'static inline' function 'list_replace_init' was resolved by moving it to 'ofd_access_batch.h'. The userspace implementation of 'fid_hash' has been moved to the 'lustreapi.h' header. Signed-off-by: Timothy Day Change-Id: I9714a2f36910c871c0a4579cf9400cb9ba72ec27 Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/49901 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Shaun Tancheff Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/include/lustre/lustreapi.h | 1 + lustre/include/lustre_fid.h | 62 ++--------------------- lustre/include/uapi/linux/lustre/lustre_fid.h | 65 ++++++++++++++++++++++++ lustre/llite/lcommon_cl.c | 4 +- lustre/utils/Makefile.am | 3 +- lustre/utils/liblustreapi_fid.c | 6 +++ lustre/utils/llsom_sync.c | 59 ++-------------------- lustre/utils/ofd_access_batch.c | 71 ++------------------------- lustre/utils/ofd_access_batch.h | 12 +++++ 9 files changed, 101 insertions(+), 182 deletions(-) diff --git a/lustre/include/lustre/lustreapi.h b/lustre/include/lustre/lustreapi.h index bec467a..e72d2b2 100644 --- a/lustre/include/lustre/lustreapi.h +++ b/lustre/include/lustre/lustreapi.h @@ -494,6 +494,7 @@ int llapi_fid2path_at(int mnt_fd, const struct lu_fid *fid, char *path, int llapi_fid2path(const char *device, const char *fidstr, char *path, int pathlen, long long *recno, int *linkno); int llapi_path2fid(const char *path, struct lu_fid *fid); +unsigned long llapi_fid_hash(const struct lu_fid *f, unsigned int shift); int llapi_get_mdt_index_by_fid(int fd, const struct lu_fid *fid, int *mdt_index); int llapi_get_lum_file(const char *path, __u64 *valid, lstatx_t *statx, diff --git a/lustre/include/lustre_fid.h b/lustre/include/lustre_fid.h index c9ae440..e6a88e7 100644 --- a/lustre/include/lustre_fid.h +++ b/lustre/include/lustre_fid.h @@ -786,65 +786,13 @@ static inline void ost_fid_from_resid(struct lu_fid *fid, } } -/** - * Flatten 128-bit FID values into a 64-bit value for use as an inode number. - * For non-IGIF FIDs this starts just over 2^32, and continues without - * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ - * into the range where there may not be many OID values in use, to minimize - * the risk of conflict. - * - * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, - * the time between re-used inode numbers is very long - 2^40 SEQ numbers, - * or about 2^40 client mounts, if clients create less than 2^24 files/mount. - */ -static inline __u64 fid_flatten(const struct lu_fid *fid) -{ - __u64 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid); - - ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); - - return ino ?: fid_oid(fid); -} - static inline __u32 fid_hash(const struct lu_fid *f, int bits) { - /* all objects with same id and different versions will belong to same - * collisions list. */ - return cfs_hash_long(fid_flatten(f), bits); -} - -/** - * map fid to 32 bit value for ino on 32bit systems. */ -static inline __u32 fid_flatten32(const struct lu_fid *fid) -{ - __u32 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid) - FID_SEQ_START; - - /* Map the high bits of the OID into higher bits of the inode number so - * that inodes generated at about the same time have a reduced chance - * of collisions. This will give a period of 2^12 = 1024 unique clients - * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. */ - ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + - (seq >> (64 - (40-8)) & 0xffffff00) + - (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); - - return ino ?: fid_oid(fid); + /* + * All objects with same id and different versions will belong to same + * collisions list. + */ + return cfs_hash_long(fid_flatten64(f), bits); } static inline int diff --git a/lustre/include/uapi/linux/lustre/lustre_fid.h b/lustre/include/uapi/linux/lustre/lustre_fid.h index f11ad3b..43787e3 100644 --- a/lustre/include/uapi/linux/lustre/lustre_fid.h +++ b/lustre/include/uapi/linux/lustre/lustre_fid.h @@ -361,4 +361,69 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, return 0; } + +/** + * Flatten 128-bit FID values into a 64-bit value for use as an inode number. + * For non-IGIF FIDs this starts just over 2^32, and continues without + * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ + * into the range where there may not be many OID values in use, to minimize + * the risk of conflict. + * + * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true, + * the time between re-used inode numbers is very long - 2^40 SEQ numbers, + * or about 2^40 client mounts, if clients create less than 2^24 files/mount. + */ +static inline __u64 fid_flatten64(const struct lu_fid *fid) +{ + __u64 ino; + __u64 seq; + + if (fid_is_igif(fid)) { + ino = lu_igif_ino(fid); + return ino; + } + + seq = fid_seq(fid); + + ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); + + return ino ?: fid_oid(fid); +} + +/** + * map fid to 32 bit value for ino on 32bit systems. + */ +static inline __u32 fid_flatten32(const struct lu_fid *fid) +{ + __u32 ino; + __u64 seq; + + if (fid_is_igif(fid)) { + ino = lu_igif_ino(fid); + return ino; + } + + seq = fid_seq(fid) - FID_SEQ_START; + + /* Map the high bits of the OID into higher bits of the inode number so + * that inodes generated at about the same time have a reduced chance + * of collisions. This will give a period of 2^12 = 1024 unique clients + * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects + * (from OID), or up to 128M inodes without collisions for new files. + */ + ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + + (seq >> (64 - (40-8)) & 0xffffff00) + + (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); + + return ino ?: fid_oid(fid); +} + +#if __BITS_PER_LONG == 32 +#define fid_flatten_long fid_flatten32 +#elif __BITS_PER_LONG == 64 +#define fid_flatten_long fid_flatten64 +#else +#error "Wordsize not 32 or 64" +#endif + #endif diff --git a/lustre/llite/lcommon_cl.c b/lustre/llite/lcommon_cl.c index f38f34a..69bd6da 100644 --- a/lustre/llite/lcommon_cl.c +++ b/lustre/llite/lcommon_cl.c @@ -276,7 +276,7 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32) if (BITS_PER_LONG == 32 || api32) RETURN(fid_flatten32(fid)); - RETURN(fid_flatten(fid)); + RETURN(fid_flatten64(fid)); } /** @@ -288,5 +288,5 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid) if (fid_is_igif(fid)) RETURN(lu_igif_gen(fid)); - RETURN(fid_flatten(fid) >> 32); + RETURN(fid_flatten64(fid) >> 32); } diff --git a/lustre/utils/Makefile.am b/lustre/utils/Makefile.am index 18d0bb1..026eb5a 100644 --- a/lustre/utils/Makefile.am +++ b/lustre/utils/Makefile.am @@ -161,7 +161,8 @@ ofd_access_log_reader_SOURCES = \ ofd_access_batch.c \ ofd_access_batch.h \ ofd_access_log_reader.c -ofd_access_log_reader_LDADD := -lpthread +ofd_access_log_reader_LDADD := -lpthread liblustreapi.la +ofd_access_log_reader_DEPENDENCIES := liblustreapi.la if UTILS diff --git a/lustre/utils/liblustreapi_fid.c b/lustre/utils/liblustreapi_fid.c index 18b2729..d6fb8b5 100644 --- a/lustre/utils/liblustreapi_fid.c +++ b/lustre/utils/liblustreapi_fid.c @@ -46,6 +46,7 @@ #include #include +#include #include #include #include "lustreapi_internal.h" @@ -488,3 +489,8 @@ int llapi_open_by_fid(const char *lustre_dir, const struct lu_fid *fid, out: return rc; } + +unsigned long llapi_fid_hash(const struct lu_fid *f, unsigned int shift) +{ + return hash_long(fid_flatten_long(f), shift); +} diff --git a/lustre/utils/llsom_sync.c b/lustre/utils/llsom_sync.c index 9d1069c..7947c2f 100644 --- a/lustre/utils/llsom_sync.c +++ b/lustre/utils/llsom_sync.c @@ -87,14 +87,6 @@ static const int fid_hash_shift = 6; #define FID_HASH_ENTRIES (1 << fid_hash_shift) #define FID_ON_HASH(f) (!hlist_unhashed(&(f)->fr_node)) -#if __BITS_PER_LONG == 32 -#define FID_HASH_FN(f) (hash_long(fid_flatten32(f), fid_hash_shift)) -#elif __BITS_PER_LONG == 64 -#define FID_HASH_FN(f) (hash_long(fid_flatten(f), fid_hash_shift)) -#else -#error Wordsize not 32 or 64 -#endif - struct lsom_head { struct hlist_head *lh_hash; struct list_head lh_list; /* ordered list by record index */ @@ -115,51 +107,6 @@ static void usage(char *prog) exit(0); } -static inline __u64 fid_flatten(const struct lu_fid *fid) -{ - __u64 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid); - - ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); - - return ino ?: fid_oid(fid); -} - -/** - * map fid to 32 bit value for ino on 32bit systems. - */ -static inline __u32 fid_flatten32(const struct lu_fid *fid) -{ - __u32 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid) - FID_SEQ_START; - - /* Map the high bits of the OID into higher bits of the inode number so - * that inodes generated at about the same time have a reduced chance - * of collisions. This will give a period of 2^12 = 1024 unique clients - * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. - */ - ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + - (seq >> (64 - (40-8)) & 0xffffff00) + - (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); - - return ino ?: fid_oid(fid); -} - static inline bool fid_eq(const lustre_fid *f1, const lustre_fid *f2) { return f1->f_seq == f2->f_seq && f1->f_oid == f2->f_oid && @@ -175,7 +122,9 @@ static void fid_hash_del(struct fid_rec *f) static void fid_hash_add(struct fid_rec *f) { assert(!FID_ON_HASH(f)); - hlist_add_head(&f->fr_node, &head.lh_hash[FID_HASH_FN(&f->fr_fid)]); + hlist_add_head(&f->fr_node, + &head.lh_hash[llapi_fid_hash(&f->fr_fid, + fid_hash_shift)]); } static struct fid_rec *fid_hash_find(const lustre_fid *fid) @@ -184,7 +133,7 @@ static struct fid_rec *fid_hash_find(const lustre_fid *fid) struct hlist_node *entry, *next; struct fid_rec *f; - hash_list = &head.lh_hash[FID_HASH_FN(fid)]; + hash_list = &head.lh_hash[llapi_fid_hash(fid, fid_hash_shift)]; hlist_for_each_entry_safe(f, entry, next, hash_list, fr_node) { assert(FID_ON_HASH(f)); if (fid_eq(fid, &f->fr_fid)) diff --git a/lustre/utils/ofd_access_batch.c b/lustre/utils/ofd_access_batch.c index 75cd43e..f984482 100644 --- a/lustre/utils/ofd_access_batch.c +++ b/lustre/utils/ofd_access_batch.c @@ -41,17 +41,10 @@ #include #include #include +#include #include "lstddef.h" #include "ofd_access_batch.h" -/* XXX Weird param order to be consistent with list_replace_init(). */ -static inline void list_replace_init(struct list_head *old_node, - struct list_head *new_node) -{ - list_add(new_node, old_node); - list_del_init(old_node); -} - struct fid_hash_node { struct list_head fhn_node; struct lu_fid fhn_fid; @@ -63,62 +56,6 @@ static inline bool fid_eq(const struct lu_fid *f1, const struct lu_fid *f2) f1->f_ver == f2->f_ver; } -static inline __u64 fid_flatten(const struct lu_fid *fid) -{ - __u64 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid); - - ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid); - - return ino != 0 ? ino : fid_oid(fid); -} - -/** - * map fid to 32 bit value for ino on 32bit systems. - */ -static inline __u32 fid_flatten32(const struct lu_fid *fid) -{ - __u32 ino; - __u64 seq; - - if (fid_is_igif(fid)) { - ino = lu_igif_ino(fid); - return ino; - } - - seq = fid_seq(fid) - FID_SEQ_START; - - /* Map the high bits of the OID into higher bits of the inode number so - * that inodes generated at about the same time have a reduced chance - * of collisions. This will give a period of 2^12 = 1024 unique clients - * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects - * (from OID), or up to 128M inodes without collisions for new files. - */ - ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) + - (seq >> (64 - (40-8)) & 0xffffff00) + - (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8); - - return ino != 0 ? ino : fid_oid(fid); -} - -static unsigned long fid_hash(const struct lu_fid *f, unsigned int shift) -{ -#if __BITS_PER_LONG == 32 - return hash_long(fid_flatten32(f), shift); -#elif __BITS_PER_LONG == 64 - return hash_long(fid_flatten(f), shift); -#else -# error "Wordsize not 32 or 64" -#endif -} - static void fhn_init(struct fid_hash_node *fhn, const struct lu_fid *fid) { INIT_LIST_HEAD(&fhn->fhn_node); @@ -148,7 +85,7 @@ void fid_hash_add(struct list_head *head, unsigned int shift, { assert(!fhn_is_hashed(fhn)); - list_add(&fhn->fhn_node, &head[fid_hash(&fhn->fhn_fid, shift)]); + list_add(&fhn->fhn_node, &head[llapi_fid_hash(&fhn->fhn_fid, shift)]); } struct fid_hash_node * @@ -157,7 +94,7 @@ fid_hash_find(struct list_head *head, unsigned int shift, const struct lu_fid *f struct list_head *hash_list; struct fid_hash_node *fhn, *next; - hash_list = &head[fid_hash(fid, shift)]; + hash_list = &head[llapi_fid_hash(fid, shift)]; list_for_each_entry_safe(fhn, next, hash_list, fhn_node) { assert(fhn_is_hashed(fhn)); @@ -174,7 +111,7 @@ fid_hash_insert(struct list_head *head, unsigned int shift, struct fid_hash_node struct list_head *list; struct fid_hash_node *old_fhn, *next; - list = &head[fid_hash(&new_fhn->fhn_fid, shift)]; + list = &head[llapi_fid_hash(&new_fhn->fhn_fid, shift)]; list_for_each_entry_safe(old_fhn, next, list, fhn_node) { assert(fhn_is_hashed(old_fhn)); diff --git a/lustre/utils/ofd_access_batch.h b/lustre/utils/ofd_access_batch.h index 8c5c8e9..7a4771c 100644 --- a/lustre/utils/ofd_access_batch.h +++ b/lustre/utils/ofd_access_batch.h @@ -3,6 +3,7 @@ #include #include #include +#include struct lu_fid; struct alr_batch; @@ -15,4 +16,15 @@ int alr_batch_add(struct alr_batch *alrb, const char *obd_name, int alr_batch_print(struct alr_batch *alrb, FILE *file, pthread_mutex_t *file_mutex, int fraction); +/* + * The code is inspired by the kernel list implementation. Hence, this has + * a weird param order to be consistent with the kernel list_replace_init(). + */ +static inline void list_replace_init(struct list_head *old_node, + struct list_head *new_node) +{ + list_add(new_node, old_node); + list_del_init(old_node); +} + #endif /* _OFD_ACCESS_BATCH_H_ */ -- 1.8.3.1