Whamcloud - gitweb
LU-8130 libcfs: port working hash from upstream 89/33789/5
authorJames Simmons <uja.ornl@yahoo.com>
Tue, 15 Jan 2019 16:25:54 +0000 (11:25 -0500)
committerOleg Drokin <green@whamcloud.com>
Mon, 11 Feb 2019 03:22:07 +0000 (03:22 +0000)
The hash_[32|64] function in pre-4.6 kernels produce hashes
with poor distributions which result in high collision rates.
Backport those improvements for the pre-4.6 kernels Lustre
supports. Details can be read here:

https://lwn.net/Articles/687494

Test-Parameters: trivial
Change-Id: Id2436ba8be2d3ed482c5386b79710f594d5b3e59
Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Reviewed-on: https://review.whamcloud.com/33789
Tested-by: Jenkins
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Alexey Lyashkov <c17817@cray.com>
Reviewed-by: Yang Sheng <ys@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
libcfs/autoconf/lustre-libcfs.m4
libcfs/include/libcfs/linux/linux-hash.h

index d437331..2763748 100644 (file)
@@ -712,6 +712,22 @@ LB_CHECK_EXPORT([kset_find_obj], [lib/kobject.c],
 ]) # LIBCFS_EXPORT_KSET_FIND_OBJ
 
 #
+# Kernel version 4.6+ commit ef703f49a6c5b909a85149bb6625c4ed0d697186
+# fixed the brokenness of hash_64(). The fix removed GOLDEN_RATIO_PRIME_64
+# since it was a poor prime value.
+#
+AC_DEFUN([LIBCFS_BROKEN_HASH_64], [
+LB_CHECK_COMPILE([kernel has fixed hash_64()],
+broken_hash_64, [
+       #include <linux/hash.h>
+],[
+       int tmp = GOLDEN_RATIO_PRIME_64;
+],[
+       AC_DEFINE(HAVE_BROKEN_HASH_64, 1, [kernel hash_64() is broken])
+])
+]) # LIBCFS_BROKEN_HASH_64
+
+#
 # LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
 #
 # linux 4.6 kernel changed stacktrace_ops address to return an int
@@ -995,6 +1011,7 @@ LIBCFS_KSTRTOBOOL_FROM_USER
 LIBCFS_CRYPTO_HASH_HELPERS
 LIBCFS_EXPORT_KSET_FIND_OBJ
 # 4.6
+LIBCFS_BROKEN_HASH_64
 LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
 LIBCFS_GET_USER_PAGES_6ARG
 LIBCFS_STRINGHASH
index 1227ec8..7c3de9e 100644 (file)
@@ -38,6 +38,34 @@ u64 cfs_hashlen_string(const void *salt, const char *name);
 #endif
 #endif /* !HAVE_STRINGHASH */
 
+#ifdef HAVE_BROKEN_HASH_64
+
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+static inline u32 cfs_hash_32(u32 val, unsigned int bits)
+{
+       /* High bits are more random, so use them. */
+       return (val * GOLDEN_RATIO_32) >> (32 - bits);
+}
+
+static __always_inline u32 cfs_hash_64(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+       /* 64x64-bit multiply is efficient on all 64-bit processors */
+       return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+       /* Hash 64 bits using only 32x32-bit multiply. */
+       return cfs_hash_32(((u32)val ^ ((val >> 32) * GOLDEN_RATIO_32)), bits);
+#endif
+}
+#else
+
+#define cfs_hash_32    hash_32
+#define cfs_hash_64    hash_64
+
+#endif /* HAVE_BROKEN_HASH_64 */
+
 #ifndef HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST
 /**
  * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
@@ -74,4 +102,4 @@ static inline void *rhashtable_lookup_get_insert_fast(
 }
 #endif /* !HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST */
 
-#endif /* __LIBCFS_LINUX_MISC_H__ */
+#endif /* __LIBCFS_LINUX_HASH_H__ */