Whamcloud - gitweb
b=22056 hash_add not under spinlock.
[fs/lustre-release.git] / libcfs / include / libcfs / libcfs_hash.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/include/libcfs/libcfs_hash.h
37  *
38  * Hashing routines
39  *
40  */
41
42 #ifndef __LIBCFS_HASH_H__
43 #define __LIBCFS_HASH_H__
44 /*
45  * Knuth recommends primes in approximately golden ratio to the maximum
46  * integer representable by a machine word for multiplicative hashing.
47  * Chuck Lever verified the effectiveness of this technique:
48  * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
49  *
50  * These primes are chosen to be bit-sparse, that is operations on
51  * them can use shifts and additions instead of multiplications for
52  * machines where multiplications are slow.
53  */
54 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
55 #define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
56 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
57 #define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
58
59 /*
60  * Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
61  * the linux kernel and user space at the same time, so we need to differentiate
62  * between them explicitely. If this is not needed on other architectures, then
63  * we'll need to move the functions to archi specific headers.
64  */
65
66 #if (defined __linux__ && defined __KERNEL__)
67 #include <linux/hash.h>
68
69 #define cfs_hash_long(val, bits)    hash_long(val, bits)
70 #else
71 /* Fast hashing routine for a long.
72    (C) 2002 William Lee Irwin III, IBM */
73
74 #if BITS_PER_LONG == 32
75 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
76 #define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_32
77 #elif BITS_PER_LONG == 64
78 /*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
79 #define CFS_GOLDEN_RATIO_PRIME          CFS_GOLDEN_RATIO_PRIME_64
80 #else
81 #error Define CFS_GOLDEN_RATIO_PRIME for your wordsize.
82 #endif
83
84 static inline unsigned long cfs_hash_long(unsigned long val, unsigned int bits)
85 {
86         unsigned long hash = val;
87
88 #if BITS_PER_LONG == 64
89         /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
90         unsigned long n = hash;
91         n <<= 18;
92         hash -= n;
93         n <<= 33;
94         hash -= n;
95         n <<= 3;
96         hash += n;
97         n <<= 3;
98         hash -= n;
99         n <<= 4;
100         hash += n;
101         n <<= 2;
102         hash += n;
103 #else
104         /* On some cpus multiply is faster, on others gcc will do shifts */
105         hash *= CFS_GOLDEN_RATIO_PRIME;
106 #endif
107
108         /* High bits are more random, so use them. */
109         return hash >> (BITS_PER_LONG - bits);
110 }
111 #if 0
112 static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
113 {
114         return cfs_hash_long((unsigned long)ptr, bits);
115 }
116 #endif
117
118 /* !(__linux__ && __KERNEL__) */
119 #endif
120
121 struct cfs_hash_ops;
122
123 typedef struct cfs_hash_bucket {
124         cfs_hlist_head_t            hsb_head;       /* entries list */
125         cfs_atomic_t                hsb_count;      /* current entries */
126         cfs_rwlock_t                hsb_rwlock;     /* cfs_hash_bucket */
127 } cfs_hash_bucket_t;
128
129 #define CFS_MAX_HASH_NAME 16
130
131 typedef struct cfs_hash {
132         int                         hs_cur_bits;    /* current hash bits */
133         int                         hs_cur_mask;    /* current hash mask */
134         int                         hs_min_bits;    /* min hash bits */
135         int                         hs_max_bits;    /* max hash bits */
136         int                         hs_min_theta;   /* resize min threshold */
137         int                         hs_max_theta;   /* resize max threshold */
138         int                         hs_flags;       /* hash flags */
139         cfs_atomic_t                hs_count;       /* current entries */
140         cfs_atomic_t                hs_rehash_count;/* resize count */
141         struct cfs_hash_bucket    **hs_buckets;     /* hash buckets */
142         struct cfs_hash_ops        *hs_ops;         /* hash operations */
143         cfs_rwlock_t                hs_rwlock;      /* cfs_hash */
144         cfs_atomic_t                hs_refcount;
145         char                        hs_name[CFS_MAX_HASH_NAME];
146 } cfs_hash_t;
147
148 typedef struct cfs_hash_ops {
149         unsigned (*hs_hash)(cfs_hash_t *hs, void *key, unsigned mask);
150         void *   (*hs_key)(cfs_hlist_node_t *hnode);
151         int      (*hs_compare)(void *key, cfs_hlist_node_t *hnode);
152         void *   (*hs_get)(cfs_hlist_node_t *hnode);
153         void *   (*hs_put)(cfs_hlist_node_t *hnode);
154         void     (*hs_exit)(cfs_hlist_node_t *hnode);
155 } cfs_hash_ops_t;
156
157 #define CFS_HASH_DEBUG          0x0001  /* Enable expensive debug checks */
158 #define CFS_HASH_REHASH         0x0002  /* Enable dynamic hash resizing */
159
160 #define CFS_HO(hs)             (hs)->hs_ops
161 #define CFS_HOP(hs, op)        (hs)->hs_ops->hs_ ## op
162
163 static inline unsigned
164 cfs_hash_id(cfs_hash_t *hs, void *key, unsigned mask)
165 {
166         LASSERT(hs);
167         LASSERT(CFS_HO(hs));
168         LASSERT(CFS_HOP(hs, hash));
169
170         return CFS_HOP(hs, hash)(hs, key, mask);
171 }
172
173 static inline void *
174 cfs_hash_key(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
175 {
176         LASSERT(hs);
177         LASSERT(hnode);
178         LASSERT(CFS_HO(hs));
179         LASSERT(CFS_HOP(hs, key));
180
181         return CFS_HOP(hs, key)(hnode);
182 }
183
184 /* Returns 1 on a match,
185  * XXX: This would be better if it returned, -1, 0, or 1 for
186  *      <, =, > respectivly.  It could then be used to implement
187  *      a CFS_HASH_SORT feature flags which could keep each hash
188  *      bucket in order.  This would increase insertion times
189  *      but could reduce lookup times for deep chains.  Ideally,
190  *      the rehash should keep chain depth short but if that
191  *      ends up not being the case this would be a nice feature.
192  */
193 static inline int
194 cfs_hash_compare(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
195 {
196         LASSERT(hs);
197         LASSERT(hnode);
198         LASSERT(CFS_HO(hs));
199
200         if (CFS_HOP(hs, compare))
201                 return CFS_HOP(hs, compare)(key, hnode);
202
203         return -EOPNOTSUPP;
204 }
205
206 static inline void *
207 cfs_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
208 {
209         LASSERT(hs);
210         LASSERT(hnode);
211         LASSERT(CFS_HO(hs));
212
213         if (CFS_HOP(hs, get))
214                 return CFS_HOP(hs, get)(hnode);
215
216         return NULL;
217 }
218
219 static inline void *
220 cfs_hash_put(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
221 {
222         LASSERT(hs);
223         LASSERT(hnode);
224         LASSERT(CFS_HO(hs));
225
226         if (CFS_HOP(hs, put))
227                 return CFS_HOP(hs, put)(hnode);
228
229         return NULL;
230 }
231
232 static inline void
233 cfs_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
234 {
235         LASSERT(hs);
236         LASSERT(hnode);
237         LASSERT(CFS_HO(hs));
238
239         if (CFS_HOP(hs, exit))
240                 return CFS_HOP(hs, exit)(hnode);
241 }
242
243 /* Validate hnode references the correct key */
244 static inline void
245 __cfs_hash_key_validate(cfs_hash_t *hs, void *key,
246                         cfs_hlist_node_t *hnode)
247 {
248         if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
249                 LASSERT(cfs_hash_compare(hs, key, hnode) > 0);
250 }
251
252 /* Validate hnode is in the correct bucket */
253 static inline void
254 __cfs_hash_bucket_validate(cfs_hash_t *hs, cfs_hash_bucket_t *hsb,
255                            cfs_hlist_node_t *hnode)
256 {
257         unsigned i;
258
259         if (unlikely(hs->hs_flags & CFS_HASH_DEBUG)) {
260                 i = cfs_hash_id(hs, cfs_hash_key(hs, hnode), hs->hs_cur_mask);
261                 LASSERT(hs->hs_buckets[i] == hsb);
262         }
263 }
264
265 static inline cfs_hlist_node_t *
266 __cfs_hash_bucket_lookup(cfs_hash_t *hs,
267                          cfs_hash_bucket_t *hsb, void *key)
268 {
269         cfs_hlist_node_t *hnode;
270
271         cfs_hlist_for_each(hnode, &hsb->hsb_head)
272                 if (cfs_hash_compare(hs, key, hnode) > 0)
273                         return hnode;
274
275         return NULL;
276 }
277
278 static inline void *
279 __cfs_hash_bucket_add(cfs_hash_t *hs,
280                       cfs_hash_bucket_t *hsb,
281                       cfs_hlist_node_t *hnode)
282 {
283         cfs_hlist_add_head(hnode, &(hsb->hsb_head));
284         cfs_atomic_inc(&hsb->hsb_count);
285         cfs_atomic_inc(&hs->hs_count);
286
287         return cfs_hash_get(hs, hnode);
288 }
289
290 static inline void *
291 __cfs_hash_bucket_del(cfs_hash_t *hs,
292                       cfs_hash_bucket_t *hsb,
293                       cfs_hlist_node_t *hnode)
294 {
295         cfs_hlist_del_init(hnode);
296         LASSERT(cfs_atomic_read(&hsb->hsb_count) > 0);
297         cfs_atomic_dec(&hsb->hsb_count);
298         LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
299         cfs_atomic_dec(&hs->hs_count);
300
301         return cfs_hash_put(hs, hnode);
302 }
303
304 /* Hash init/cleanup functions */
305 cfs_hash_t *cfs_hash_create(char *name, unsigned int cur_bits,
306                             unsigned int max_bits,
307                             cfs_hash_ops_t *ops, int flags);
308 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs);
309 void cfs_hash_putref(cfs_hash_t *hs);
310
311 /* Hash addition functions */
312 void cfs_hash_add(cfs_hash_t *hs, void *key,
313                   cfs_hlist_node_t *hnode);
314 int cfs_hash_add_unique(cfs_hash_t *hs, void *key,
315                         cfs_hlist_node_t *hnode);
316 void *cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
317                               cfs_hlist_node_t *hnode);
318
319 /* Hash deletion functions */
320 void *cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode);
321 void *cfs_hash_del_key(cfs_hash_t *hs, void *key);
322
323 /* Hash lookup/for_each functions */
324 void *cfs_hash_lookup(cfs_hash_t *hs, void *key);
325 typedef void (*cfs_hash_for_each_cb_t)(void *obj, void *data);
326 void cfs_hash_for_each(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
327 void cfs_hash_for_each_safe(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
328 void cfs_hash_for_each_empty(cfs_hash_t *hs, cfs_hash_for_each_cb_t, void *data);
329 void cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
330                            cfs_hash_for_each_cb_t, void *data);
331
332 /*
333  * Rehash - Theta is calculated to be the average chained
334  * hash depth assuming a perfectly uniform hash funcion.
335  */
336 int cfs_hash_rehash(cfs_hash_t *hs, int bits);
337 void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key,
338                          void *new_key, cfs_hlist_node_t *hnode);
339
340
341 #define CFS_HASH_THETA_BITS  10
342
343 /* Return integer component of theta */
344 static inline int __cfs_hash_theta_int(int theta)
345 {
346         return (theta >> CFS_HASH_THETA_BITS);
347 }
348
349 /* Return a fractional value between 0 and 999 */
350 static inline int __cfs_hash_theta_frac(int theta)
351 {
352         return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
353                (__cfs_hash_theta_int(theta) * 1000);
354 }
355
356 static inline int __cfs_hash_theta(cfs_hash_t *hs)
357 {
358         return (cfs_atomic_read(&hs->hs_count) <<
359                 CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
360 }
361
362 static inline void __cfs_hash_set_theta(cfs_hash_t *hs, int min, int max)
363 {
364         LASSERT(min < max);
365         hs->hs_min_theta = min;
366         hs->hs_max_theta = max;
367 }
368
369 /* Generic debug formatting routines mainly for proc handler */
370 int cfs_hash_debug_header(char *str, int size);
371 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size);
372
373 /*
374  * Generic djb2 hash algorithm for character arrays.
375  */
376 static inline unsigned
377 cfs_hash_djb2_hash(void *key, size_t size, unsigned mask)
378 {
379         unsigned i, hash = 5381;
380
381         LASSERT(key != NULL);
382
383         for (i = 0; i < size; i++)
384                 hash = hash * 33 + ((char *)key)[i];
385
386         return (hash & mask);
387 }
388
389 /*
390  * Generic u32 hash algorithm.
391  */
392 static inline unsigned
393 cfs_hash_u32_hash(__u32 key, unsigned mask)
394 {
395         return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
396 }
397
398 /*
399  * Generic u64 hash algorithm.
400  */
401 static inline unsigned
402 cfs_hash_u64_hash(__u64 key, unsigned mask)
403 {
404         return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
405 }
406
407 #define cfs_hash_for_each_bucket(hs, hsb, pos)   \
408         for (pos = 0;                            \
409              pos <= hs->hs_cur_mask &&           \
410              (hsb = hs->hs_buckets[pos]);       \
411              pos++)
412
413 #define cfs_hash_for_each_bucket_restart(hs, hsb, pos)  \
414         for (/* pos=0 done once by caller */;           \
415              pos <= hs->hs_cur_mask &&                  \
416              (hsb = hs->hs_buckets[pos]);              \
417              pos++)
418 /* !__LIBCFS__HASH_H__ */
419 #endif