1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef __CLASS_HASH_H
38 #define __CLASS_HASH_H
40 #include <lustre_lib.h>
42 struct lustre_hash_ops;
44 typedef struct lustre_hash_bucket {
48 struct hlist_head lhb_head;
57 } lustre_hash_bucket_t;
59 typedef struct lustre_hash {
67 unsigned int lh_name_size;
71 unsigned int lh_cur_size;
75 unsigned int lh_min_size;
79 unsigned int lh_max_size;
81 * Resize min threshold.
83 unsigned int lh_min_theta;
85 * Resize max threshold.
87 unsigned int lh_max_theta;
99 atomic_t lh_rehash_count;
103 struct lustre_hash_bucket *lh_buckets;
107 struct lustre_hash_ops *lh_ops;
109 * Protects lustre_hash.
114 typedef struct lustre_hash_ops {
115 unsigned (*lh_hash)(lustre_hash_t *lh, void *key, unsigned mask);
116 void * (*lh_key)(struct hlist_node *hnode);
117 int (*lh_compare)(void *key, struct hlist_node *hnode);
118 void * (*lh_get)(struct hlist_node *hnode);
119 void * (*lh_put)(struct hlist_node *hnode);
120 void (*lh_exit)(struct hlist_node *hnode);
124 * Enable expensive debug checks.
126 #define LH_DEBUG 0x0001
128 * Enable dynamic hash resizing.
130 #define LH_REHASH 0x0002
132 #define LHO(lh) (lh)->lh_ops
133 #define LHP(lh, op) (lh)->lh_ops->lh_ ## op
135 static inline unsigned
136 lh_hash(lustre_hash_t *lh, void *key, unsigned mask)
140 if (LHO(lh) && LHP(lh, hash))
141 return LHP(lh, hash)(lh, key, mask);
147 lh_key(lustre_hash_t *lh, struct hlist_node *hnode)
152 if (LHO(lh) && LHP(lh, key))
153 return LHP(lh, key)(hnode);
159 * Returns 1 on a match,
160 * XXX: This would be better if it returned, -1, 0, or 1 for
161 * <, =, > respectivly. It could then be used to implement
162 * a LH_SORT feature flags which could keep each lustre hash
163 * bucket in order. This would increase insertion times
164 * but could reduce lookup times for deep chains. Ideally,
165 * the rehash should keep chain depth short but if that
166 * ends up not being the case this would be a nice feature.
169 lh_compare(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
174 if (LHO(lh) && LHP(lh, compare))
175 return LHP(lh, compare)(key, hnode);
181 lh_get(lustre_hash_t *lh, struct hlist_node *hnode)
186 if (LHO(lh) && LHP(lh, get))
187 return LHP(lh, get)(hnode);
193 lh_put(lustre_hash_t *lh, struct hlist_node *hnode)
198 if (LHO(lh) && LHP(lh, put))
199 return LHP(lh, put)(hnode);
205 lh_exit(lustre_hash_t *lh, struct hlist_node *hnode)
210 if (LHO(lh) && LHP(lh, exit))
211 return LHP(lh, exit)(hnode);
215 * Validate hnode references the correct key.
218 __lustre_hash_key_validate(lustre_hash_t *lh, void *key,
219 struct hlist_node *hnode)
221 if (unlikely(lh->lh_flags & LH_DEBUG))
222 LASSERT(lh_compare(lh, key, hnode));
226 * Validate hnode is in the correct bucket.
229 __lustre_hash_bucket_validate(lustre_hash_t *lh, lustre_hash_bucket_t *lhb,
230 struct hlist_node *hnode)
234 if (unlikely(lh->lh_flags & LH_DEBUG)) {
235 i = lh_hash(lh, lh_key(lh, hnode), lh->lh_cur_size - 1);
236 LASSERT(&lh->lh_buckets[i] == lhb);
240 static inline struct hlist_node *
241 __lustre_hash_bucket_lookup(lustre_hash_t *lh,
242 lustre_hash_bucket_t *lhb, void *key)
244 struct hlist_node *hnode;
246 hlist_for_each(hnode, &lhb->lhb_head)
247 if (lh_compare(lh, key, hnode))
254 __lustre_hash_bucket_add(lustre_hash_t *lh,
255 lustre_hash_bucket_t *lhb,
256 struct hlist_node *hnode)
258 hlist_add_head(hnode, &(lhb->lhb_head));
259 atomic_inc(&lhb->lhb_count);
260 atomic_inc(&lh->lh_count);
262 return lh_get(lh, hnode);
266 __lustre_hash_bucket_del(lustre_hash_t *lh,
267 lustre_hash_bucket_t *lhb,
268 struct hlist_node *hnode)
270 hlist_del_init(hnode);
271 atomic_dec(&lhb->lhb_count);
272 atomic_dec(&lh->lh_count);
274 return lh_put(lh, hnode);
278 * Hash init/cleanup functions.
280 lustre_hash_t *lustre_hash_init(char *name, unsigned int cur_size,
281 unsigned int max_size,
282 lustre_hash_ops_t *ops, int flags);
283 void lustre_hash_exit(lustre_hash_t *lh);
286 * Hash addition functions.
288 void lustre_hash_add(lustre_hash_t *lh, void *key,
289 struct hlist_node *hnode);
290 int lustre_hash_add_unique(lustre_hash_t *lh, void *key,
291 struct hlist_node *hnode);
292 void *lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
293 struct hlist_node *hnode);
296 * Hash deletion functions.
298 void *lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode);
299 void *lustre_hash_del_key(lustre_hash_t *lh, void *key);
302 * Hash lookup/for_each functions.
304 void *lustre_hash_lookup(lustre_hash_t *lh, void *key);
305 typedef void (*lh_for_each_cb)(void *obj, void *data);
306 void lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb, void *data);
307 void lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb, void *data);
308 void lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb, void *data);
309 void lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
310 lh_for_each_cb, void *data);
313 * Rehash - theta is calculated to be the average chained
314 * hash depth assuming a perfectly uniform hash funcion.
316 int lustre_hash_rehash(lustre_hash_t *lh, int size);
317 void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key,
318 void *new_key, struct hlist_node *hnode);
322 __lustre_hash_theta(lustre_hash_t *lh)
324 return ((atomic_read(&lh->lh_count) * 1000) / lh->lh_cur_size);
328 __lustre_hash_set_theta(lustre_hash_t *lh, int min, int max)
331 lh->lh_min_theta = min;
332 lh->lh_min_theta = max;
336 * Generic debug formatting routines mainly for proc handler.
338 int lustre_hash_debug_header(char *str, int size);
339 int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size);
343 * 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1
345 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
347 * 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1
349 #define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
353 * Generic djb2 hash algorithm for character arrays.
355 static inline unsigned
356 lh_djb2_hash(void *key, size_t size, unsigned mask)
358 unsigned i, hash = 5381;
360 LASSERT(key != NULL);
362 for (i = 0; i < size; i++)
363 hash = hash * 33 + ((char *)key)[i];
369 * Generic u32 hash algorithm.
371 static inline unsigned
372 lh_u32_hash(__u32 key, unsigned mask)
374 RETURN((key * GOLDEN_RATIO_PRIME_32) & mask);
378 * Generic u64 hash algorithm.
380 static inline unsigned
381 lh_u64_hash(__u64 key, unsigned mask)
383 RETURN((unsigned)(key * GOLDEN_RATIO_PRIME_64) & mask);
386 #define lh_for_each_bucket(lh, lhb, pos) \
388 pos < lh->lh_cur_size && \
389 ({ lhb = &lh->lh_buckets[i]; 1; }); \
392 #endif /* __CLASS_HASH_H */