4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 #ifndef __LIBCFS_LINUX_HASH_H__
24 #define __LIBCFS_LINUX_HASH_H__
26 #include <linux/dcache.h>
27 #include <linux/rhashtable.h>
29 u64 cfs_hashlen_string(const void *salt, const char *name);
32 #define hashlen_hash(hashlen) ((u32)(hashlen))
35 #ifndef HAVE_STRINGHASH
36 #ifndef hashlen_create
37 #define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
39 #endif /* !HAVE_STRINGHASH */
41 #ifdef HAVE_BROKEN_HASH_64
43 #define GOLDEN_RATIO_32 0x61C88647
44 #define GOLDEN_RATIO_64 0x61C8864680B583EBull
46 static inline u32 cfs_hash_32(u32 val, unsigned int bits)
48 /* High bits are more random, so use them. */
49 return (val * GOLDEN_RATIO_32) >> (32 - bits);
52 static __always_inline u32 cfs_hash_64(u64 val, unsigned int bits)
54 #if BITS_PER_LONG == 64
55 /* 64x64-bit multiply is efficient on all 64-bit processors */
56 return val * GOLDEN_RATIO_64 >> (64 - bits);
58 /* Hash 64 bits using only 32x32-bit multiply. */
59 return cfs_hash_32(((u32)val ^ ((val >> 32) * GOLDEN_RATIO_32)), bits);
64 #define cfs_hash_32 hash_32
65 #define cfs_hash_64 hash_64
67 #endif /* HAVE_BROKEN_HASH_64 */
69 #ifndef HAVE_RHASHTABLE_WALK_ENTER
70 static int rhashtable_walk_enter(struct rhashtable *ht,
71 struct rhashtable_iter *iter)
73 #ifdef HAVE_3ARG_RHASHTABLE_WALK_INIT
74 return rhashtable_walk_init(ht, iter, GFP_KERNEL);
76 return rhashtable_walk_init(ht, iter);
83 struct rhash_head rhead;
84 struct rhlist_head __rcu *next;
91 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
92 for (pos = list; pos && rht_entry(tpos, pos, member); \
93 pos = rcu_dereference_raw(pos->next))
95 static inline int rhltable_init(struct rhltable *hlt,
96 const struct rhashtable_params *params)
98 return rhashtable_init(&hlt->ht, params);
101 static inline struct rhlist_head *rhltable_lookup(
102 struct rhltable *hlt, const void *key,
103 const struct rhashtable_params params)
105 struct rhashtable *ht = &hlt->ht;
106 struct rhashtable_compare_arg arg = {
110 struct bucket_table *tbl;
111 struct rhash_head *he;
114 tbl = rht_dereference_rcu(ht->tbl, ht);
116 hash = rht_key_hashfn(ht, tbl, key, params);
117 rht_for_each_rcu(he, tbl, hash) {
118 if (params.obj_cmpfn ?
119 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
120 rhashtable_compare(&arg, rht_obj(ht, he)))
122 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
125 /* Ensure we see any new tables. */
128 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
135 static inline int rhltable_insert_key(
136 struct rhltable *hlt, const void *key, struct rhlist_head *list,
137 const struct rhashtable_params params)
139 #ifdef HAVE_HASHTABLE_INSERT_FAST_RETURN_INT
140 return __rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
143 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
148 static inline int rhltable_remove(
149 struct rhltable *hlt, struct rhlist_head *list,
150 const struct rhashtable_params params)
152 return rhashtable_remove_fast(&hlt->ht, &list->rhead, params);
155 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
156 void (*free_fn)(void *ptr,
160 rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
163 static inline void rhltable_destroy(struct rhltable *hlt)
165 rhltable_free_and_destroy(hlt, NULL, NULL);
168 static inline void rhltable_walk_enter(struct rhltable *hlt,
169 struct rhashtable_iter *iter)
171 rhashtable_walk_enter(&hlt->ht, iter);
173 #endif /* !HAVE_RHLTABLE */
175 #ifndef HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST
177 * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
179 * @obj: pointer to hash head inside object
180 * @params: hash table parameters
182 * Just like rhashtable_lookup_insert_fast(), but this function returns the
183 * object if it exists, NULL if it did not and the insertion was successful,
184 * and an ERR_PTR otherwise.
186 static inline void *rhashtable_lookup_get_insert_fast(
187 struct rhashtable *ht, struct rhash_head *obj,
188 const struct rhashtable_params params)
194 rc = rhashtable_lookup_insert_fast(ht, obj, params);
197 key = rht_obj(ht, obj);
198 ret = rhashtable_lookup_fast(ht, key, params);
209 #endif /* !HAVE_RHASHTABLE_LOOKUP_GET_INSERT_FAST */
211 #ifndef HAVE_RHASHTABLE_LOOKUP
213 * The function rhashtable_lookup() and rhashtable_lookup_fast()
214 * are almost the same except rhashtable_lookup() doesn't
215 * take the RCU read lock. Since this is the case and only
216 * SLES12 SP3 lacks rhashtable_lookup() just duplicate the
217 * SLES12 SP3 rhashtable_lookup_fast() minus the RCU read lock.
219 static inline void *rhashtable_lookup(
220 struct rhashtable *ht, const void *key,
221 const struct rhashtable_params params)
223 struct rhashtable_compare_arg arg = {
227 const struct bucket_table *tbl;
228 struct rhash_head *he;
231 tbl = rht_dereference_rcu(ht->tbl, ht);
233 hash = rht_key_hashfn(ht, tbl, key, params);
234 rht_for_each_rcu(he, tbl, hash) {
235 if (params.obj_cmpfn ?
236 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
237 rhashtable_compare(&arg, rht_obj(ht, he)))
239 return rht_obj(ht, he);
242 /* Ensure we see any new tables. */
245 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
251 #endif /* !HAVE_RHASHTABLE_LOOKUP */
253 #ifndef HAVE_RHT_BUCKET_VAR
254 static inline struct rhash_head __rcu **rht_bucket_var(
255 struct bucket_table *tbl, unsigned int hash)
257 return &tbl->buckets[hash];
261 #ifndef HAVE_RHASHTABLE_REPLACE
262 /* Internal function, please use rhashtable_replace_fast() instead */
263 static inline int __rhashtable_replace_fast(
264 struct rhashtable *ht, struct bucket_table *tbl,
265 struct rhash_head *obj_old, struct rhash_head *obj_new,
266 const struct rhashtable_params params)
268 struct rhash_head __rcu **pprev;
269 struct rhash_head *he;
274 /* Minimally, the old and new objects must have same hash
275 * (which should mean identifiers are the same).
277 hash = rht_head_hashfn(ht, tbl, obj_old, params);
278 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
281 lock = rht_bucket_lock(tbl, hash);
285 pprev = rht_bucket_var(tbl, hash);
286 rht_for_each_continue(he, *pprev, tbl, hash) {
292 rcu_assign_pointer(obj_new->next, obj_old->next);
293 rcu_assign_pointer(*pprev, obj_new);
298 spin_unlock_bh(lock);
304 * rhashtable_replace_fast - replace an object in hash table
306 * @obj_old: pointer to hash head inside object being replaced
307 * @obj_new: pointer to hash head inside object which is new
308 * @params: hash table parameters
310 * Replacing an object doesn't affect the number of elements in the hash table
311 * or bucket, so we don't need to worry about shrinking or expanding the
314 * Returns zero on success, -ENOENT if the entry could not be found,
315 * -EINVAL if hash is not the same for the old and new objects.
317 static inline int rhashtable_replace_fast(
318 struct rhashtable *ht, struct rhash_head *obj_old,
319 struct rhash_head *obj_new,
320 const struct rhashtable_params params)
322 struct bucket_table *tbl;
327 tbl = rht_dereference_rcu(ht->tbl, ht);
329 /* Because we have already taken (and released) the bucket
330 * lock in old_tbl, if we find that future_tbl is not yet
331 * visible then that guarantees the entry to still be in
332 * the old tbl if it exists.
334 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
336 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
343 #endif /* HAVE_RHASHTABLE_REPLACE */
345 #endif /* __LIBCFS_LINUX_HASH_H__ */