Clang has default errors related to unused functions.
The errors related to 'fid_flatten' and 'fid_flatten32'
were resolved by moving the definitions of these
functions to the 'lustre_fid' header. This is a better
place for them, since they are small 'static inline'
functions and has the added benefit of cutting down
code duplication.
The error related to the 'static inline' function
'list_replace_init' was resolved by moving it to
'ofd_access_batch.h'.
The userspace implementation of 'fid_hash' has been
moved to the 'lustreapi.h' header.
Signed-off-by: Timothy Day <timday@amazon.com>
Change-Id: I9714a2f36910c871c0a4579cf9400cb9ba72ec27
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/49901
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
int llapi_fid2path(const char *device, const char *fidstr, char *path,
int pathlen, long long *recno, int *linkno);
int llapi_path2fid(const char *path, struct lu_fid *fid);
+unsigned long llapi_fid_hash(const struct lu_fid *f, unsigned int shift);
int llapi_get_mdt_index_by_fid(int fd, const struct lu_fid *fid,
int *mdt_index);
int llapi_get_lum_file(const char *path, __u64 *valid, lstatx_t *statx,
}
}
-/**
- * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
- * For non-IGIF FIDs this starts just over 2^32, and continues without
- * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
- * into the range where there may not be many OID values in use, to minimize
- * the risk of conflict.
- *
- * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
- * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
- * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
- */
-static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- __u64 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid);
-
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
-
- return ino ?: fid_oid(fid);
-}
-
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
{
- /* all objects with same id and different versions will belong to same
- * collisions list. */
- return cfs_hash_long(fid_flatten(f), bits);
-}
-
-/**
- * map fid to 32 bit value for ino on 32bit systems. */
-static inline __u32 fid_flatten32(const struct lu_fid *fid)
-{
- __u32 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid) - FID_SEQ_START;
-
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files. */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
-
- return ino ?: fid_oid(fid);
+ /*
+ * All objects with same id and different versions will belong to same
+ * collisions list.
+ */
+ return cfs_hash_long(fid_flatten64(f), bits);
}
static inline int
return 0;
}
+
+/**
+ * Flatten 128-bit FID values into a 64-bit value for use as an inode number.
+ * For non-IGIF FIDs this starts just over 2^32, and continues without
+ * conflict until 2^64, at which point we wrap the high 24 bits of the SEQ
+ * into the range where there may not be many OID values in use, to minimize
+ * the risk of conflict.
+ *
+ * Suppose LUSTRE_SEQ_MAX_WIDTH less than (1 << 24) which is currently true,
+ * the time between re-used inode numbers is very long - 2^40 SEQ numbers,
+ * or about 2^40 client mounts, if clients create less than 2^24 files/mount.
+ */
+static inline __u64 fid_flatten64(const struct lu_fid *fid)
+{
+ __u64 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
+
+ seq = fid_seq(fid);
+
+ ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
+
+ return ino ?: fid_oid(fid);
+}
+
+/**
+ * map fid to 32 bit value for ino on 32bit systems.
+ */
+static inline __u32 fid_flatten32(const struct lu_fid *fid)
+{
+ __u32 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
+
+ seq = fid_seq(fid) - FID_SEQ_START;
+
+ /* Map the high bits of the OID into higher bits of the inode number so
+ * that inodes generated at about the same time have a reduced chance
+ * of collisions. This will give a period of 2^12 = 1024 unique clients
+ * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
+ * (from OID), or up to 128M inodes without collisions for new files.
+ */
+ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
+ (seq >> (64 - (40-8)) & 0xffffff00) +
+ (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
+
+ return ino ?: fid_oid(fid);
+}
+
+#if __BITS_PER_LONG == 32
+#define fid_flatten_long fid_flatten32
+#elif __BITS_PER_LONG == 64
+#define fid_flatten_long fid_flatten64
+#else
+#error "Wordsize not 32 or 64"
+#endif
+
#endif
if (BITS_PER_LONG == 32 || api32)
RETURN(fid_flatten32(fid));
- RETURN(fid_flatten(fid));
+ RETURN(fid_flatten64(fid));
}
/**
if (fid_is_igif(fid))
RETURN(lu_igif_gen(fid));
- RETURN(fid_flatten(fid) >> 32);
+ RETURN(fid_flatten64(fid) >> 32);
}
ofd_access_batch.c \
ofd_access_batch.h \
ofd_access_log_reader.c
-ofd_access_log_reader_LDADD := -lpthread
+ofd_access_log_reader_LDADD := -lpthread liblustreapi.la
+ofd_access_log_reader_DEPENDENCIES := liblustreapi.la
if UTILS
#include <sched.h>
#include <libcfs/util/ioctl.h>
+#include <libcfs/util/hash.h>
#include <lustre/lustreapi.h>
#include <linux/lustre/lustre_fid.h>
#include "lustreapi_internal.h"
out:
return rc;
}
+
+unsigned long llapi_fid_hash(const struct lu_fid *f, unsigned int shift)
+{
+ return hash_long(fid_flatten_long(f), shift);
+}
#define FID_HASH_ENTRIES (1 << fid_hash_shift)
#define FID_ON_HASH(f) (!hlist_unhashed(&(f)->fr_node))
-#if __BITS_PER_LONG == 32
-#define FID_HASH_FN(f) (hash_long(fid_flatten32(f), fid_hash_shift))
-#elif __BITS_PER_LONG == 64
-#define FID_HASH_FN(f) (hash_long(fid_flatten(f), fid_hash_shift))
-#else
-#error Wordsize not 32 or 64
-#endif
-
struct lsom_head {
struct hlist_head *lh_hash;
struct list_head lh_list; /* ordered list by record index */
exit(0);
}
-static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- __u64 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid);
-
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
-
- return ino ?: fid_oid(fid);
-}
-
-/**
- * map fid to 32 bit value for ino on 32bit systems.
- */
-static inline __u32 fid_flatten32(const struct lu_fid *fid)
-{
- __u32 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid) - FID_SEQ_START;
-
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files.
- */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
-
- return ino ?: fid_oid(fid);
-}
-
static inline bool fid_eq(const lustre_fid *f1, const lustre_fid *f2)
{
return f1->f_seq == f2->f_seq && f1->f_oid == f2->f_oid &&
static void fid_hash_add(struct fid_rec *f)
{
assert(!FID_ON_HASH(f));
- hlist_add_head(&f->fr_node, &head.lh_hash[FID_HASH_FN(&f->fr_fid)]);
+ hlist_add_head(&f->fr_node,
+ &head.lh_hash[llapi_fid_hash(&f->fr_fid,
+ fid_hash_shift)]);
}
static struct fid_rec *fid_hash_find(const lustre_fid *fid)
struct hlist_node *entry, *next;
struct fid_rec *f;
- hash_list = &head.lh_hash[FID_HASH_FN(fid)];
+ hash_list = &head.lh_hash[llapi_fid_hash(fid, fid_hash_shift)];
hlist_for_each_entry_safe(f, entry, next, hash_list, fr_node) {
assert(FID_ON_HASH(f));
if (fid_eq(fid, &f->fr_fid))
#include <linux/lustre/lustre_idl.h>
#include <libcfs/util/hash.h>
#include <libcfs/util/list.h>
+#include <lustre/lustreapi.h>
#include "lstddef.h"
#include "ofd_access_batch.h"
-/* XXX Weird param order to be consistent with list_replace_init(). */
-static inline void list_replace_init(struct list_head *old_node,
- struct list_head *new_node)
-{
- list_add(new_node, old_node);
- list_del_init(old_node);
-}
-
struct fid_hash_node {
struct list_head fhn_node;
struct lu_fid fhn_fid;
f1->f_ver == f2->f_ver;
}
-static inline __u64 fid_flatten(const struct lu_fid *fid)
-{
- __u64 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid);
-
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
-
- return ino != 0 ? ino : fid_oid(fid);
-}
-
-/**
- * map fid to 32 bit value for ino on 32bit systems.
- */
-static inline __u32 fid_flatten32(const struct lu_fid *fid)
-{
- __u32 ino;
- __u64 seq;
-
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- return ino;
- }
-
- seq = fid_seq(fid) - FID_SEQ_START;
-
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files.
- */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
-
- return ino != 0 ? ino : fid_oid(fid);
-}
-
-static unsigned long fid_hash(const struct lu_fid *f, unsigned int shift)
-{
-#if __BITS_PER_LONG == 32
- return hash_long(fid_flatten32(f), shift);
-#elif __BITS_PER_LONG == 64
- return hash_long(fid_flatten(f), shift);
-#else
-# error "Wordsize not 32 or 64"
-#endif
-}
-
static void fhn_init(struct fid_hash_node *fhn, const struct lu_fid *fid)
{
INIT_LIST_HEAD(&fhn->fhn_node);
{
assert(!fhn_is_hashed(fhn));
- list_add(&fhn->fhn_node, &head[fid_hash(&fhn->fhn_fid, shift)]);
+ list_add(&fhn->fhn_node, &head[llapi_fid_hash(&fhn->fhn_fid, shift)]);
}
struct fid_hash_node *
struct list_head *hash_list;
struct fid_hash_node *fhn, *next;
- hash_list = &head[fid_hash(fid, shift)];
+ hash_list = &head[llapi_fid_hash(fid, shift)];
list_for_each_entry_safe(fhn, next, hash_list, fhn_node) {
assert(fhn_is_hashed(fhn));
struct list_head *list;
struct fid_hash_node *old_fhn, *next;
- list = &head[fid_hash(&new_fhn->fhn_fid, shift)];
+ list = &head[llapi_fid_hash(&new_fhn->fhn_fid, shift)];
list_for_each_entry_safe(old_fhn, next, list, fhn_node) {
assert(fhn_is_hashed(old_fhn));
#include <pthread.h>
#include <sys/types.h>
#include <linux/types.h>
+#include <libcfs/util/list.h>
struct lu_fid;
struct alr_batch;
int alr_batch_print(struct alr_batch *alrb, FILE *file,
pthread_mutex_t *file_mutex, int fraction);
+/*
+ * The code is inspired by the kernel list implementation. Hence, this has
+ * a weird param order to be consistent with the kernel list_replace_init().
+ */
+static inline void list_replace_init(struct list_head *old_node,
+ struct list_head *new_node)
+{
+ list_add(new_node, old_node);
+ list_del_init(old_node);
+}
+
#endif /* _OFD_ACCESS_BATCH_H_ */