Whamcloud - gitweb
b=19557 actually make lustre_hash_for_each_empty() more efficient
[fs/lustre-release.git] / libcfs / include / libcfs / libcfs_hash.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/include/libcfs/libcfs_hash.h
37  *
38  * Hashing routines
39  *
40  */
41
42 #ifndef __LIBCFS_HASH_H__
43 #define __LIBCFS_HASH_H__
44 /*
45  * Knuth recommends primes in approximately golden ratio to the maximum
46  * integer representable by a machine word for multiplicative hashing.
47  * Chuck Lever verified the effectiveness of this technique:
48  * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
49  *
50  * These primes are chosen to be bit-sparse, that is operations on
51  * them can use shifts and additions instead of multiplications for
52  * machines where multiplications are slow.
53  */
54 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
55 #define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
56 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
57 #define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
58
59 /*
60  * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
61  * the linux kernel and user space at the same time, so we need to differentiate
62  * between them explicitely. If this is not needed on other architectures, then
63  * we'll need to move the functions to archi specific headers.
64  */
65
66 #if (defined __linux__ && defined __KERNEL__)
67 #include <linux/hash.h>
68
69 #define cfs_hash_long(val, bits)    hash_long(val, bits)
70 #else
71 /* Fast hashing routine for a long.
72    (C) 2002 William Lee Irwin III, IBM */
73
74 #if BITS_PER_LONG == 32
75 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
76 #define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_32
77 #elif BITS_PER_LONG == 64
78 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
79 #define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_64
80 #else
81 #error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
82 #endif
83
84 static inline unsigned long cfs_hash_long(unsigned long val, unsigned int bits)
85 {
86         unsigned long hash = val;
87
88 #if BITS_PER_LONG == 64
89         /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
90         unsigned long n = hash;
91         n <<= 18;
92         hash -= n;
93         n <<= 33;
94         hash -= n;
95         n <<= 3;
96         hash += n;
97         n <<= 3;
98         hash -= n;
99         n <<= 4;
100         hash += n;
101         n <<= 2;
102         hash += n;
103 #else
104         /* On some cpus multiply is faster, on others gcc will do shifts */
105         hash *= CFS_GOLDEN_RATIO_PRIME;
106 #endif
107
108         /* High bits are more random, so use them. */
109         return hash >> (BITS_PER_LONG - bits);
110 }
111 #if 0
112 static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
113 {
114         return cfs_hash_long((unsigned long)ptr, bits);
115 }
116 #endif
117
118 /* !(__linux__ && __KERNEL__) */
119 #endif
120
121 struct cfs_hash_ops;
122
123 typedef struct cfs_hash_bucket {
124         cfs_hlist_head_t            hsb_head;       /* entries list */
125         cfs_atomic_t                hsb_count;      /* current entries */
126         cfs_rwlock_t                hsb_rwlock;     /* cfs_hash_bucket */
127 } cfs_hash_bucket_t;
128
129 #define CFS_MAX_HASH_NAME 16
130
131 typedef struct cfs_hash {
132         int                         hs_cur_bits;    /* current hash bits */
133         int                         hs_cur_mask;    /* current hash mask */
134         int                         hs_min_bits;    /* min hash bits */
135         int                         hs_max_bits;    /* max hash bits */
136         int                         hs_min_theta;   /* resize min threshold */
137         int                         hs_max_theta;   /* resize max threshold */
138         int                         hs_flags;       /* hash flags */
139         cfs_atomic_t                hs_count;       /* current entries */
140         cfs_atomic_t                hs_rehash_count;/* resize count */
141         struct cfs_hash_bucket    **hs_buckets;     /* hash buckets */
142         struct cfs_hash_ops        *hs_ops;         /* hash operations */
143         cfs_rwlock_t                hs_rwlock;      /* cfs_hash */
144         char                        hs_name[CFS_MAX_HASH_NAME];
145 } cfs_hash_t;
146
147 typedef struct cfs_hash_ops {
148         unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
149         void *   (*hs_key)(cfs_hlist_node_t *hnode);
150         int      (*hs_compare)(void *key, cfs_hlist_node_t *hnode);
151         void *   (*hs_get)(cfs_hlist_node_t *hnode);
152         void *   (*hs_put)(cfs_hlist_node_t *hnode);
153         void     (*hs_exit)(cfs_hlist_node_t *hnode);
154 } cfs_hash_ops_t;
155
156 #define CFS_HASH_DEBUG          0x0001  /* Enable expensive debug checks */
157 #define CFS_HASH_REHASH         0x0002  /* Enable dynamic hash resizing */
158
159 #define CFS_HO(hs)             (hs)->hs_ops
160 #define CFS_HOP(hs, op)        (hs)->hs_ops->hs_ ## op
161
162 static inline unsigned
163 cfs_hash_id(cfs_hash_t *hs, void *key, unsigned mask)
164 {
165         LASSERT(hs);
166         LASSERT(CFS_HO(hs));
167         LASSERT(CFS_HOP(hs, hash));
168
169         return CFS_HOP(hs, hash)(hs, key, mask);
170 }
171
172 static inline void *
173 cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
174 {
175         LASSERT(hs);
176         LASSERT(hnode);
177         LASSERT(CFS_HO(hs));
178
179         if (CFS_HOP(hs, key))
180                 return CFS_HOP(hs, key)(hnode);
181
182         return NULL;
183 }
184
185 /* Returns 1 on a match,
186  * XXX: This would be better if it returned, -1, 0, or 1 for
187  *      <, =, > respectivly.  It could then be used to implement
188  *      a CFS_HASH_SORT feature flags which could keep each hash
189  *      bucket in order.  This would increase insertion times
190  *      but could reduce lookup times for deep chains.  Ideally,
191  *      the rehash should keep chain depth short but if that
192  *      ends up not being the case this would be a nice feature.
193  */
194 static inline int
195 cfs_hash_compare(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
196 {
197         LASSERT(hs);
198         LASSERT(hnode);
199         LASSERT(CFS_HO(hs));
200
201         if (CFS_HOP(hs, compare))
202                 return CFS_HOP(hs, compare)(key, hnode);
203
204         return -EOPNOTSUPP;
205 }
206
207 static inline void *
208 cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
209 {
210         LASSERT(hs);
211         LASSERT(hnode);
212         LASSERT(CFS_HO(hs));
213
214         if (CFS_HOP(hs, get))
215                 return CFS_HOP(hs, get)(hnode);
216
217         return NULL;
218 }
219
220 static inline void *
221 cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
222 {
223         LASSERT(hs);
224         LASSERT(hnode);
225         LASSERT(CFS_HO(hs));
226
227         if (CFS_HOP(hs, put))
228                 return CFS_HOP(hs, put)(hnode);
229
230         return NULL;
231 }
232
233 static inline void
234 cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
235 {
236         LASSERT(hs);
237         LASSERT(hnode);
238         LASSERT(CFS_HO(hs));
239
240         if (CFS_HOP(hs, exit))
241                 return CFS_HOP(hs, exit)(hnode);
242 }
243
244 /* Validate hnode references the correct key */
245 static inline void
246 __cfs_hash_key_validate(cfs_hash_t *hs, void *key,
247                         cfs_hlist_node_t *hnode)
248 {
249         if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
250                 LASSERT(cfs_hash_compare(hs, key, hnode) > 0);
251 }
252
253 /* Validate hnode is in the correct bucket */
254 static inline void
255 __cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
256                            cfs_hlist_node_t *hnode)
257 {
258         unsigned i;
259
260         if (unlikely(hs->hs_flags & CFS_HASH_DEBUG)) {
261                 i = cfs_hash_id(hs, cfs_hash_key(hs, hnode), hs->hs_cur_mask);
262                 LASSERT(hs->hs_buckets[i] == hsb);
263         }
264 }
265
266 static inline cfs_hlist_node_t *
267 __cfs_hash_bucket_lookup(cfs_hash_t *hs,
268                          cfs_hash_bucket_t *hsb, void *key)
269 {
270         cfs_hlist_node_t *hnode;
271
272         cfs_hlist_for_each(hnode, &hsb->hsb_head)
273                 if (cfs_hash_compare(hs, key, hnode) > 0)
274                         return hnode;
275
276         return NULL;
277 }
278
279 static inline void *
280 __cfs_hash_bucket_add(cfs_hash_t *hs,
281                       cfs_hash_bucket_t *hsb,
282                       cfs_hlist_node_t *hnode)
283 {
284         cfs_hlist_add_head(hnode, &(hsb->hsb_head));
285         cfs_atomic_inc(&hsb->hsb_count);
286         cfs_atomic_inc(&hs->hs_count);
287
288         return cfs_hash_get(hs, hnode);
289 }
290
291 static inline void *
292 __cfs_hash_bucket_del(cfs_hash_t *hs,
293                       cfs_hash_bucket_t *hsb,
294                       cfs_hlist_node_t *hnode)
295 {
296         cfs_hlist_del_init(hnode);
297         LASSERT(cfs_atomic_read(&hsb->hsb_count) > 0);
298         cfs_atomic_dec(&hsb->hsb_count);
299         LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
300         cfs_atomic_dec(&hs->hs_count);
301
302         return cfs_hash_put(hs, hnode);
303 }
304
305 /* Hash init/cleanup functions */
306 cfs_hash_t *cfs_hash_create(char *name, unsigned int cur_bits,
307                             unsigned int max_bits,
308                             cfs_hash_ops_t *ops, int flags);
309 void cfs_hash_destroy(cfs_hash_t *hs);
310
311 /* Hash addition functions */
312 void cfs_hash_add(cfs_hash_t *hs, void *key,
313                   cfs_hlist_node_t *hnode);
314 int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
315                         cfs_hlist_node_t *hnode);
316 void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
317                               cfs_hlist_node_t *hnode);
318
319 /* Hash deletion functions */
320 void *cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode);
321 void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
322
323 /* Hash lookup/for_each functions */
324 void *cfs_hash_lookup(cfs_hash_t *hs, void *key);
325 typedef void (*cfs_hash_for_each_cb_t)(void *obj, void *data);
326 void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
327 void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
328 void cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
329 void cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
330                            cfs_hash_for_each_cb_t, void *data);
331
332 /*
333  * Rehash - Theta is calculated to be the average chained
334  * hash depth assuming a perfectly uniform hash funcion.
335  */
336 int cfs_hash_rehash(cfs_hash_t *hs, int bits);
337 void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
338                          void *new_key, cfs_hlist_node_t *hnode);
339
340
341 #define CFS_HASH_THETA_BITS  10
342
343 /* Return integer component of theta */
344 static inline int __cfs_hash_theta_int(int theta)
345 {
346         return (theta >> CFS_HASH_THETA_BITS);
347 }
348
349 /* Return a fractional value between 0 and 999 */
350 static inline int __cfs_hash_theta_frac(int theta)
351 {
352         return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
353                (__cfs_hash_theta_int(theta) * 1000);
354 }
355
356 static inline int __cfs_hash_theta(cfs_hash_t *hs)
357 {
358         return (cfs_atomic_read(&hs->hs_count) <<
359                 CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
360 }
361
362 static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
363 {
364         LASSERT(min < max);
365         hs->hs_min_theta = min;
366         hs->hs_max_theta = max;
367 }
368
369 /* Generic debug formatting routines mainly for proc handler */
370 int cfs_hash_debug_header(char *str, int size);
371 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size);
372
373 /*
374  * Generic djb2 hash algorithm for character arrays.
375  */
376 static inline unsigned
377 cfs_hash_djb2_hash(void *key, size_t size, unsigned mask)
378 {
379         unsigned i, hash = 5381;
380
381         LASSERT(key != NULL);
382
383         for (i = 0; i < size; i++)
384                 hash = hash * 33 + ((char *)key)[i];
385
386         return (hash & mask);
387 }
388
389 /*
390  * Generic u32 hash algorithm.
391  */
392 static inline unsigned
393 cfs_hash_u32_hash(__u32 key, unsigned mask)
394 {
395         return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
396 }
397
398 /*
399  * Generic u64 hash algorithm.
400  */
401 static inline unsigned
402 cfs_hash_u64_hash(__u64 key, unsigned mask)
403 {
404         return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
405 }
406
407 #define cfs_hash_for_each_bucket(hs, hsb, pos)   \
408         for (pos = 0;                            \
409              pos <= hs->hs_cur_mask &&           \
410              (hsb = hs->hs_buckets[pos]);       \
411              pos++)
412
413 #define cfs_hash_for_each_bucket_restart(hs, hsb, pos)  \
414         for (/* pos=0 done once by caller */;           \
415              pos <= hs->hs_cur_mask &&                  \
416              (hsb = hs->hs_buckets[pos]);              \
417              pos++)
418 /* !__LIBCFS__HASH_H__ */
419 #endif