4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/include/libcfs/libcfs_hash.h
38 #ifndef __LIBCFS_HASH_H__
39 #define __LIBCFS_HASH_H__
41 #include <linux/hash.h>
42 #include <linux/spinlock.h>
43 #include <linux/workqueue.h>
46 * Knuth recommends primes in approximately golden ratio to the maximum
47 * integer representable by a machine word for multiplicative hashing.
48 * Chuck Lever verified the effectiveness of this technique:
49 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
51 * These primes are chosen to be bit-sparse, that is operations on
52 * them can use shifts and additions instead of multiplications for
53 * machines where multiplications are slow.
55 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
56 #define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
57 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
58 #define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
61 #define CFS_HASH_DEBUG_NONE 0
62 /** record hash depth and output to console when it's too deep,
63 * computing overhead is low but consume more memory */
64 #define CFS_HASH_DEBUG_1 1
65 /** expensive, check key validation */
66 #define CFS_HASH_DEBUG_2 2
68 #define CFS_HASH_DEBUG_LEVEL CFS_HASH_DEBUG_NONE
71 struct cfs_hash_lock_ops;
72 struct cfs_hash_hlist_ops;
75 rwlock_t rw; /**< rwlock */
76 spinlock_t spin; /**< spinlock */
77 struct rw_semaphore rw_sem; /**< rwsem */
81 * cfs_hash_bucket is a container of:
83 * - array of hash-head starting from hsb_head[0], hash-head can be one of
84 * . struct cfs_hash_head
85 * . struct cfs_hash_head_dep
86 * . struct cfs_hash_dhead
87 * . struct cfs_hash_dhead_dep
88 * which depends on requirement of user
89 * - some extra bytes (caller can require it while creating hash)
91 struct cfs_hash_bucket {
92 union cfs_hash_lock hsb_lock; /**< bucket lock */
93 __u32 hsb_count; /**< current entries */
94 __u32 hsb_version; /**< change version */
95 unsigned int hsb_index; /**< index of bucket */
96 int hsb_depmax; /**< max depth on bucket */
97 long hsb_head[0]; /**< hash-head array */
101 * cfs_hash bucket descriptor, it's normally in stack of caller
104 /**< address of bucket */
105 struct cfs_hash_bucket *bd_bucket;
106 /**< offset in bucket */
107 unsigned int bd_offset;
110 #define CFS_HASH_NAME_LEN 16 /**< default name length */
111 #define CFS_HASH_BIGNAME_LEN 64 /**< bigname for param tree */
113 #define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
114 #define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
115 #define CFS_HASH_BITS_MIN CFS_HASH_BKT_BITS
118 * common hash attributes.
122 * don't need any lock, caller will protect operations with it's
123 * own lock. With this flag:
124 * . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
126 * . Some functions will be disabled with this flag, i.e:
127 * cfs_hash_for_each_empty, cfs_hash_rehash
129 CFS_HASH_NO_LOCK = 1 << 0,
130 /** no bucket lock, use one spinlock to protect the whole hash */
131 CFS_HASH_NO_BKTLOCK = 1 << 1,
132 /** rwlock to protect bucket */
133 CFS_HASH_RW_BKTLOCK = 1 << 2,
134 /** spinlock to protect bucket */
135 CFS_HASH_SPIN_BKTLOCK = 1 << 3,
136 /** always add new item to tail */
137 CFS_HASH_ADD_TAIL = 1 << 4,
138 /** hash-table doesn't have refcount on item */
139 CFS_HASH_NO_ITEMREF = 1 << 5,
140 /** big name for param-tree */
141 CFS_HASH_BIGNAME = 1 << 6,
142 /** track global count */
143 CFS_HASH_COUNTER = 1 << 7,
144 /** rehash item by new key */
145 CFS_HASH_REHASH_KEY = 1 << 8,
146 /** Enable dynamic hash resizing */
147 CFS_HASH_REHASH = 1 << 9,
148 /** can shrink hash-size */
149 CFS_HASH_SHRINK = 1 << 10,
150 /** assert hash is empty on exit */
151 CFS_HASH_ASSERT_EMPTY = 1 << 11,
152 /** record hlist depth */
153 CFS_HASH_DEPTH = 1 << 12,
155 * rehash is always scheduled in a different thread, so current
156 * change on hash table is non-blocking
158 CFS_HASH_NBLK_CHANGE = 1 << 13,
159 /** rw semaphore lock to protect bucket */
160 CFS_HASH_RW_SEM_BKTLOCK = 1 << 14,
161 /** NB, we typed hs_flags as __u16, please change it
162 * if you need to extend >=16 flags */
165 /** most used attributes */
166 #define CFS_HASH_DEFAULT (CFS_HASH_RW_BKTLOCK | \
167 CFS_HASH_COUNTER | CFS_HASH_REHASH)
170 * cfs_hash is a hash-table implementation for general purpose, it can support:
171 * . two refcount modes
172 * hash-table with & without refcount
174 * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
175 * . general operations
176 * lookup, add(add_tail or add_head), delete
180 * locked iteration and unlocked iteration
182 * support long name hash
184 * trace max searching depth
187 * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
188 * is spawned to handle the rehash in the background, it's possible that other
189 * processes can concurrently perform additions, deletions, and lookups
190 * without being blocked on rehash completion, because rehash will release
191 * the global wrlock for each bucket.
193 * rehash and iteration can't run at the same time because it's too tricky
194 * to keep both of them safe and correct.
195 * As they are relatively rare operations, so:
196 * . if iteration is in progress while we try to launch rehash, then
197 * it just giveup, iterator will launch rehash at the end.
198 * . if rehash is in progress while we try to iterate the hash table,
199 * then we just wait (shouldn't be very long time), anyway, nobody
200 * should expect iteration of whole hash-table to be non-blocking.
202 * During rehashing, a (key,object) pair may be in one of two buckets,
203 * depending on whether the worker task has yet to transfer the object
204 * to its new location in the table. Lookups and deletions need to search both
205 * locations; additions must take care to only insert into the new bucket.
209 /** serialize with rehash, or serialize all operations if
210 * the hash-table has CFS_HASH_NO_BKTLOCK */
211 union cfs_hash_lock hs_lock;
212 /** hash operations */
213 struct cfs_hash_ops *hs_ops;
214 /** hash lock operations */
215 struct cfs_hash_lock_ops *hs_lops;
216 /** hash list operations */
217 struct cfs_hash_hlist_ops *hs_hops;
218 /** hash buckets-table */
219 struct cfs_hash_bucket **hs_buckets;
220 /** total number of items on this hash-table */
222 /** hash flags, see cfs_hash_tag for detail */
224 /** # of extra-bytes for bucket, for user saving extended attributes */
225 __u16 hs_extra_bytes;
226 /** wants to iterate */
228 /** hash-table is dying */
230 /** current hash bits */
236 /** bits for rehash */
238 /** bits for each bucket */
240 /** resize min threshold */
242 /** resize max threshold */
245 __u32 hs_rehash_count;
246 /** # of iterators (caller of cfs_hash_for_each_*) */
248 /** rehash workitem */
249 struct work_struct hs_rehash_work;
250 /** refcount on this hash table */
251 atomic_t hs_refcount;
252 /** rehash buckets-table */
253 struct cfs_hash_bucket **hs_rehash_buckets;
254 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
255 /** serialize debug members */
256 spinlock_t hs_dep_lock;
258 unsigned int hs_dep_max;
259 /** id of the deepest bucket */
260 unsigned int hs_dep_bkt;
261 /** offset in the deepest bucket */
262 unsigned int hs_dep_off;
263 /** bits when we found the max depth */
264 unsigned int hs_dep_bits;
265 /** workitem to output max depth */
266 struct work_struct hs_dep_work;
268 /** name of htable */
272 struct cfs_hash_lock_ops {
273 /** lock the hash table */
274 void (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
275 /** unlock the hash table */
276 void (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
277 /** lock the hash bucket */
278 void (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
279 /** unlock the hash bucket */
280 void (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
283 struct cfs_hash_hlist_ops {
284 /** return hlist_head of hash-head of @bd */
285 struct hlist_head *(*hop_hhead)(struct cfs_hash *hs, struct cfs_hash_bd *bd);
286 /** return hash-head size */
287 int (*hop_hhead_size)(struct cfs_hash *hs);
288 /** add @hnode to hash-head of @bd */
289 int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
290 struct hlist_node *hnode);
291 /** remove @hnode from hash-head of @bd */
292 int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
293 struct hlist_node *hnode);
296 struct cfs_hash_ops {
297 /** return hashed value from @key */
298 unsigned (*hs_hash)(struct cfs_hash *hs, const void *key, unsigned mask);
299 /** return key address of @hnode */
300 void * (*hs_key)(struct hlist_node *hnode);
301 /** copy key from @hnode to @key */
302 void (*hs_keycpy)(struct hlist_node *hnode, void *key);
304 * compare @key with key of @hnode
305 * returns 1 on a match
307 int (*hs_keycmp)(const void *key, struct hlist_node *hnode);
308 /** return object address of @hnode, i.e: container_of(...hnode) */
309 void * (*hs_object)(struct hlist_node *hnode);
310 /** get refcount of item, always called with holding bucket-lock */
311 void (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
312 /** release refcount of item */
313 void (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
314 /** release refcount of item, always called with holding bucket-lock */
315 void (*hs_put_locked)(struct cfs_hash *hs, struct hlist_node *hnode);
316 /** it's called before removing of @hnode */
317 void (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
320 /** total number of buckets in @hs */
321 #define CFS_HASH_NBKT(hs) \
322 (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
324 /** total number of buckets in @hs while rehashing */
325 #define CFS_HASH_RH_NBKT(hs) \
326 (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
328 /** number of hlist for in bucket */
329 #define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
331 /** total number of hlist in @hs */
332 #define CFS_HASH_NHLIST(hs) (1U << (hs)->hs_cur_bits)
334 /** total number of hlist in @hs while rehashing */
335 #define CFS_HASH_RH_NHLIST(hs) (1U << (hs)->hs_rehash_bits)
338 cfs_hash_with_no_lock(struct cfs_hash *hs)
340 /* caller will serialize all operations for this hash-table */
341 return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
345 cfs_hash_with_no_bktlock(struct cfs_hash *hs)
347 /* no bucket lock, one single lock to protect the hash-table */
348 return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
352 cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
354 /* rwlock to protect hash bucket */
355 return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
359 cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
361 /* spinlock to protect hash bucket */
362 return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
366 cfs_hash_with_rw_sem_bktlock(struct cfs_hash *hs)
368 /* rw sem lock to protect hash bucket */
369 return (hs->hs_flags & CFS_HASH_RW_SEM_BKTLOCK) != 0;
373 cfs_hash_with_add_tail(struct cfs_hash *hs)
375 return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
379 cfs_hash_with_no_itemref(struct cfs_hash *hs)
381 /* hash-table doesn't keep refcount on item,
382 * item can't be removed from hash unless it's
384 return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
388 cfs_hash_with_bigname(struct cfs_hash *hs)
390 return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
394 cfs_hash_with_counter(struct cfs_hash *hs)
396 return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
400 cfs_hash_with_rehash(struct cfs_hash *hs)
402 return (hs->hs_flags & CFS_HASH_REHASH) != 0;
406 cfs_hash_with_rehash_key(struct cfs_hash *hs)
408 return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
412 cfs_hash_with_shrink(struct cfs_hash *hs)
414 return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
418 cfs_hash_with_assert_empty(struct cfs_hash *hs)
420 return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
424 cfs_hash_with_depth(struct cfs_hash *hs)
426 return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
430 cfs_hash_with_nblk_change(struct cfs_hash *hs)
432 return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
436 cfs_hash_is_exiting(struct cfs_hash *hs)
437 { /* cfs_hash_destroy is called */
438 return hs->hs_exiting;
442 cfs_hash_is_rehashing(struct cfs_hash *hs)
443 { /* rehash is launched */
444 return hs->hs_rehash_bits != 0;
448 cfs_hash_is_iterating(struct cfs_hash *hs)
449 { /* someone is calling cfs_hash_for_each_* */
450 return hs->hs_iterating || hs->hs_iterators != 0;
454 cfs_hash_bkt_size(struct cfs_hash *hs)
456 return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
457 hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
461 static inline unsigned
462 cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
464 return hs->hs_ops->hs_hash(hs, key, mask);
468 cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
470 return hs->hs_ops->hs_key(hnode);
474 cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
476 if (hs->hs_ops->hs_keycpy != NULL)
477 hs->hs_ops->hs_keycpy(hnode, key);
481 * Returns 1 on a match,
484 cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
486 return hs->hs_ops->hs_keycmp(key, hnode);
490 cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
492 return hs->hs_ops->hs_object(hnode);
496 cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
498 return hs->hs_ops->hs_get(hs, hnode);
502 cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
504 return hs->hs_ops->hs_put_locked(hs, hnode);
508 cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
510 return hs->hs_ops->hs_put(hs, hnode);
514 cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
516 if (hs->hs_ops->hs_exit)
517 hs->hs_ops->hs_exit(hs, hnode);
520 static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
522 hs->hs_lops->hs_lock(&hs->hs_lock, excl);
525 static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
527 hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
530 static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
533 LASSERT(cfs_hash_with_no_bktlock(hs));
534 return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
537 static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
538 struct cfs_hash_bd *bd, int excl)
540 hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
543 static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
544 struct cfs_hash_bd *bd, int excl)
546 hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
550 * operations on cfs_hash bucket (bd: bucket descriptor),
551 * they are normally for hash-table without rehash
553 void cfs_hash_bd_get(struct cfs_hash *hs, const void *key,
554 struct cfs_hash_bd *bd);
557 cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
558 struct cfs_hash_bd *bd, int excl)
560 cfs_hash_bd_get(hs, key, bd);
561 cfs_hash_bd_lock(hs, bd, excl);
564 static inline unsigned
565 cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
567 return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
571 cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
572 struct cfs_hash_bd *bd)
574 bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
575 bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
579 cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
581 return (void *)bd->bd_bucket +
582 cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
586 cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
588 /* need hold cfs_hash_bd_lock */
589 return bd->bd_bucket->hsb_version;
593 cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
595 /* need hold cfs_hash_bd_lock */
596 return bd->bd_bucket->hsb_count;
600 cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
602 return bd->bd_bucket->hsb_depmax;
606 cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
608 if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
609 return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
611 if (bd1->bd_offset != bd2->bd_offset)
612 return bd1->bd_offset - bd2->bd_offset;
617 void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
618 struct hlist_node *hnode);
619 void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
620 struct hlist_node *hnode);
621 void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
622 struct cfs_hash_bd *bd_new,
623 struct hlist_node *hnode);
626 cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
629 LASSERT(cfs_hash_with_spin_bktlock(hs));
630 return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
633 static inline struct hlist_head *
634 cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
636 return hs->hs_hops->hop_hhead(hs, bd);
640 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
643 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
646 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
647 const void *key, struct hlist_node *hnode,
650 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
651 const void *key, struct hlist_node *hnode);
654 * operations on cfs_hash bucket (bd: bucket descriptor),
655 * they are safe for hash-table with rehash
657 void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
658 struct cfs_hash_bd *bds);
659 void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
661 void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
665 cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
666 struct cfs_hash_bd *bds, int excl)
668 cfs_hash_dual_bd_get(hs, key, bds);
669 cfs_hash_dual_bd_lock(hs, bds, excl);
673 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
676 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
677 const void *key, struct hlist_node *hnode,
680 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
681 const void *key, struct hlist_node *hnode);
683 /* Hash init/cleanup functions */
685 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
686 unsigned bkt_bits, unsigned extra_bytes,
687 unsigned min_theta, unsigned max_theta,
688 struct cfs_hash_ops *ops, unsigned flags);
690 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
691 void cfs_hash_putref(struct cfs_hash *hs);
693 /* Hash addition functions */
694 void cfs_hash_add(struct cfs_hash *hs, const void *key,
695 struct hlist_node *hnode);
696 int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
697 struct hlist_node *hnode);
698 void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
699 struct hlist_node *hnode);
701 /* Hash deletion functions */
702 void *cfs_hash_del(struct cfs_hash *hs, const void *key,
703 struct hlist_node *hnode);
704 void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
706 /* Hash lookup/for_each functions */
707 #define CFS_HASH_LOOP_HOG 1024
709 typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
710 struct cfs_hash_bd *bd,
711 struct hlist_node *node,
714 cfs_hash_lookup(struct cfs_hash *hs, const void *key);
716 cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
718 cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
720 cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
721 void *data, int start);
723 cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
726 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
727 cfs_hash_for_each_cb_t, void *data);
728 typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
730 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
733 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
734 cfs_hash_for_each_cb_t, void *data);
735 int cfs_hash_is_empty(struct cfs_hash *hs);
736 __u64 cfs_hash_size_get(struct cfs_hash *hs);
739 * Rehash - Theta is calculated to be the average chained
740 * hash depth assuming a perfectly uniform hash function.
742 void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
743 void cfs_hash_rehash_cancel(struct cfs_hash *hs);
744 void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
745 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
746 void *new_key, struct hlist_node *hnode);
748 #if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
749 /* Validate hnode references the correct key */
751 cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
752 struct hlist_node *hnode)
754 LASSERT(cfs_hash_keycmp(hs, key, hnode));
757 /* Validate hnode is in the correct bucket */
759 cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
760 struct hlist_node *hnode)
762 struct cfs_hash_bd bds[2];
764 cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
765 LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
766 bds[1].bd_bucket == bd->bd_bucket);
769 #else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
772 cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
773 struct hlist_node *hnode) {}
776 cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
777 struct hlist_node *hnode) {}
779 #endif /* CFS_HASH_DEBUG_LEVEL */
781 #define CFS_HASH_THETA_BITS 10
782 #define CFS_HASH_MIN_THETA (1U << (CFS_HASH_THETA_BITS - 1))
783 #define CFS_HASH_MAX_THETA (1U << (CFS_HASH_THETA_BITS + 1))
785 /* Return integer component of theta */
786 static inline int __cfs_hash_theta_int(int theta)
788 return (theta >> CFS_HASH_THETA_BITS);
791 /* Return a fractional value between 0 and 999 */
792 static inline int __cfs_hash_theta_frac(int theta)
794 return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
795 (__cfs_hash_theta_int(theta) * 1000);
798 static inline int __cfs_hash_theta(struct cfs_hash *hs)
800 return (atomic_read(&hs->hs_count) <<
801 CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
805 __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
808 hs->hs_min_theta = (__u16)min;
809 hs->hs_max_theta = (__u16)max;
812 /* Generic debug formatting routines mainly for proc handler */
814 void cfs_hash_debug_header(struct seq_file *m);
815 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
818 * Generic djb2 hash algorithm for character arrays.
820 static inline unsigned
821 cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
823 unsigned i, hash = 5381;
825 LASSERT(key != NULL);
827 for (i = 0; i < size; i++)
828 hash = hash * 33 + ((char *)key)[i];
830 return (hash & mask);
834 * Generic u32 hash algorithm.
836 static inline unsigned
837 cfs_hash_u32_hash(const __u32 key, unsigned mask)
839 return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
843 * Generic u64 hash algorithm.
845 static inline unsigned
846 cfs_hash_u64_hash(const __u64 key, unsigned mask)
848 return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
851 /** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
852 #define cfs_hash_for_each_bd(bds, n, i) \
853 for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
855 /** iterate over all buckets of @hs */
856 #define cfs_hash_for_each_bucket(hs, bd, pos) \
858 pos < CFS_HASH_NBKT(hs) && \
859 ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
861 /** iterate over all hlist of bucket @bd */
862 #define cfs_hash_bd_for_each_hlist(hs, bd, hlist) \
863 for ((bd)->bd_offset = 0; \
864 (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) && \
865 (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL; \
868 /* !__LIBCFS__HASH_H__ */