]) # LIBCFS_STRINGHASH
#
+# LIBCFS_RHASHTABLE_INSERT_FAST
+#
+# 4.7+ kernel commit 5ca8cc5bf11faed257c762018aea9106d529232f
+# changed __rhashtable_insert_fast to support the new function
+# rhashtable_lookup_get_insert_key().
+#
+AC_DEFUN([LIBCFS_RHASHTABLE_INSERT_FAST], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if internal '__rhashtable_insert_fast()' returns int],
+rhashtable_insert_fast, [
+ #include <linux/rhashtable.h>
+],[
+ const struct rhashtable_params params = { 0 };
+ int rc;
+
+ rc = __rhashtable_insert_fast(NULL, NULL, NULL, params);
+],[
+ AC_DEFINE(HAVE_HASHTABLE_INSERT_FAST_RETURN_INT, 1,
+ ['__rhashtable_insert_fast()' returns int])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LIBCFS_RHASHTABLE_INSERT_FAST
+
+#
+# Kernel version 4.8-rc6 commit ca26893f05e86497a86732768ec53cd38c0819ca
+# introduced rhashtable_lookup
+#
+AC_DEFUN([LIBCFS_RHASHTABLE_LOOKUP], [
+LB_CHECK_COMPILE([if 'rhashtable_lookup' exist],
+rhashtable_lookup, [
+ #include <linux/rhashtable.h>
+],[
+ const struct rhashtable_params params = { 0 };
+ void *ret;
+
+ ret = rhashtable_lookup(NULL, NULL, params);
+],[
+ AC_DEFINE(HAVE_RHASHTABLE_LOOKUP, 1,
+ [rhashtable_lookup() is available])
+])
+]) # LIBCFS_RHASHTABLE_LOOKUP
+
+#
+# LIBCFS_RHLTABLE
+# Kernel version 4.8-rc6 commit ca26893f05e86497a86732768ec53cd38c0819ca
+# created the rhlist interface to allow inserting duplicate objects
+# into the same table.
+#
+AC_DEFUN([LIBCFS_RHLTABLE], [
+LB_CHECK_COMPILE([does 'struct rhltable' exist],
+rhtable, [
+ #include <linux/rhashtable.h>
+],[
+ struct rhltable *hlt;
+
+ rhltable_destroy(hlt);
+],[
+ AC_DEFINE(HAVE_RHLTABLE, 1,
+ [struct rhltable exist])
+])
+]) # LIBCFS_RHLTABLE
+
+#
# LIBCFS_STACKTRACE_OPS
#
# Kernel version 4.8 commit c8fe4609827aedc9c4b45de80e7cdc8ccfa8541b
LIBCFS_STACKTRACE_OPS_ADDRESS_RETURN_INT
LIBCFS_GET_USER_PAGES_6ARG
LIBCFS_STRINGHASH
+# 4.7
+LIBCFS_RHASHTABLE_INSERT_FAST
# 4.8
+LIBCFS_RHASHTABLE_LOOKUP
+LIBCFS_RHLTABLE
LIBCFS_STACKTRACE_OPS
# 4.9
LIBCFS_GET_USER_PAGES_GUP_FLAGS
#endif /* HAVE_BROKEN_HASH_64 */
+#ifndef HAVE_RHLTABLE
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
+struct rhltable {
+ struct rhashtable ht;
+};
+
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
+static inline int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params)
+{
+ return rhashtable_init(&hlt->ht, params);
+}
+
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable *ht = &hlt->ht;
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+
+ return NULL;
+}
+
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+#ifdef HAVE_HASHTABLE_INSERT_FAST_RETURN_INT
+ return __rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params);
+#else
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params));
+#endif
+}
+
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return rhashtable_remove_fast(&hlt->ht, &list->rhead, params);
+}
+
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ rhashtable_walk_init(&hlt->ht, iter);
+}
+#endif /* !HAVE_RHLTABLE */
+
#ifndef HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST
/**
* rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
}
#endif /* !HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST */
+#ifndef HAVE_RHASHTABLE_LOOKUP
+/*
+ * The function rhashtable_lookup() and rhashtable_lookup_fast()
+ * are almost the same except rhashtable_lookup() doesn't
+ * take the RCU read lock. Since this is the case and only
+ * SLES12 SP3 lacks rhashtable_lookup() just duplicate the
+ * SLES12 SP3 rhashtable_lookup_fast() minus the RCU read lock.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+
+ return NULL;
+}
+#endif /* !HAVE_RHASHTABLE_LOOKUP */
+
#endif /* __LIBCFS_LINUX_HASH_H__ */