]) # LIBCFS_EXPORT_KSET_FIND_OBJ
#
+# Kernel version 4.6+ commit ef703f49a6c5b909a85149bb6625c4ed0d697186
+# fixed the brokenness of hash_64(). The fix removed GOLDEN_RATIO_PRIME_64
+# since it was a poor prime value.
+#
+AC_DEFUN([LIBCFS_BROKEN_HASH_64], [
+LB_CHECK_COMPILE([kernel has fixed hash_64()],
+broken_hash_64, [
+ #include <linux/hash.h>
+],[
+ int tmp = GOLDEN_RATIO_PRIME_64;
+],[
+ AC_DEFINE(HAVE_BROKEN_HASH_64, 1, [kernel hash_64() is broken])
+])
+]) # LIBCFS_BROKEN_HASH_64
+
+#
# LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
#
# linux 4.6 kernel changed stacktrace_ops address to return an int
LIBCFS_CRYPTO_HASH_HELPERS
LIBCFS_EXPORT_KSET_FIND_OBJ
# 4.6
+LIBCFS_BROKEN_HASH_64
LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
LIBCFS_GET_USER_PAGES_6ARG
LIBCFS_STRINGHASH
#endif
#endif /* !HAVE_STRINGHASH */
+#ifdef HAVE_BROKEN_HASH_64
+
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+static inline u32 cfs_hash_32(u32 val, unsigned int bits)
+{
+ /* High bits are more random, so use them. */
+ return (val * GOLDEN_RATIO_32) >> (32 - bits);
+}
+
+static __always_inline u32 cfs_hash_64(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return cfs_hash_32(((u32)val ^ ((val >> 32) * GOLDEN_RATIO_32)), bits);
+#endif
+}
+#else
+
+#define cfs_hash_32 hash_32
+#define cfs_hash_64 hash_64
+
+#endif /* HAVE_BROKEN_HASH_64 */
+
#ifndef HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST
/**
* rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
}
#endif /* !HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST */
-#endif /* __LIBCFS_LINUX_MISC_H__ */
+#endif /* __LIBCFS_LINUX_HASH_H__ */