1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(intead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
58 #include <libcfs/libcfs.h>
60 static void cfs_hash_destroy(cfs_hash_t *hs);
63 cfs_hash_rlock(cfs_hash_t *hs)
65 if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
66 cfs_read_lock(&hs->hs_rwlock);
70 cfs_hash_runlock(cfs_hash_t *hs)
72 if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
73 cfs_read_unlock(&hs->hs_rwlock);
77 cfs_hash_wlock(cfs_hash_t *hs)
79 if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
80 cfs_write_lock(&hs->hs_rwlock);
84 cfs_hash_wunlock(cfs_hash_t *hs)
86 if ((hs->hs_flags & CFS_HASH_REHASH) != 0)
87 cfs_write_unlock(&hs->hs_rwlock);
91 * Initialize new libcfs hash, where:
92 * @name - Descriptive hash name
93 * @cur_bits - Initial hash table size, in bits
94 * @max_bits - Maximum allowed hash table resize, in bits
95 * @ops - Registered hash table operations
96 * @flags - CFS_HASH_REHASH enable synamic hash resizing
97 * - CFS_HASH_SORT enable chained hash sort
100 cfs_hash_create(char *name, unsigned int cur_bits,
101 unsigned int max_bits, cfs_hash_ops_t *ops, int flags)
107 LASSERT(name != NULL);
108 LASSERT(ops != NULL);
110 LASSERT(cur_bits > 0);
111 LASSERT(max_bits >= cur_bits);
112 LASSERT(max_bits < 31);
113 LASSERT(cur_bits == max_bits || (flags & CFS_HASH_REHASH) != 0);
119 strncpy(hs->hs_name, name, sizeof(hs->hs_name));
120 hs->hs_name[sizeof(hs->hs_name) - 1] = '\0';
121 cfs_atomic_set(&hs->hs_rehash_count, 0);
122 cfs_atomic_set(&hs->hs_refcount, 1);
123 cfs_atomic_set(&hs->hs_count, 0);
124 cfs_rwlock_init(&hs->hs_rwlock);
125 hs->hs_cur_bits = cur_bits;
126 hs->hs_cur_mask = (1 << cur_bits) - 1;
127 hs->hs_min_bits = cur_bits;
128 hs->hs_max_bits = max_bits;
129 /* XXX: need to fixup cfs_hash_rehash_bits() before this can be
130 * anything other than 0.5 and 2.0 */
131 hs->hs_min_theta = 1 << (CFS_HASH_THETA_BITS - 1);
132 hs->hs_max_theta = 1 << (CFS_HASH_THETA_BITS + 1);
134 hs->hs_flags = flags;
137 __cfs_hash_set_theta(hs, 500, 2000);
139 LIBCFS_ALLOC(hs->hs_buckets,
140 sizeof(*hs->hs_buckets) << hs->hs_cur_bits);
141 if (hs->hs_buckets == NULL) {
146 for (i = 0; i <= hs->hs_cur_mask; i++) {
147 CFS_ALLOC_PTR(hs->hs_buckets[i]);
148 if (hs->hs_buckets[i] == NULL) {
149 cfs_hash_destroy(hs);
153 CFS_INIT_HLIST_HEAD(&hs->hs_buckets[i]->hsb_head);
154 cfs_rwlock_init(&hs->hs_buckets[i]->hsb_rwlock);
155 cfs_atomic_set(&hs->hs_buckets[i]->hsb_count, 0);
160 CFS_EXPORT_SYMBOL(cfs_hash_create);
163 * Cleanup libcfs hash @hs.
166 cfs_hash_destroy(cfs_hash_t *hs)
168 cfs_hash_bucket_t *hsb;
169 cfs_hlist_node_t *hnode;
170 cfs_hlist_node_t *pos;
178 cfs_hash_for_each_bucket(hs, hsb, i) {
182 cfs_write_lock(&hsb->hsb_rwlock);
183 cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
184 __cfs_hash_bucket_validate(hs, hsb, hnode);
185 __cfs_hash_bucket_del(hs, hsb, hnode);
186 cfs_hash_exit(hs, hnode);
189 LASSERT(cfs_hlist_empty(&(hsb->hsb_head)));
190 LASSERT(cfs_atomic_read(&hsb->hsb_count) == 0);
191 cfs_write_unlock(&hsb->hsb_rwlock);
195 LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
196 cfs_hash_wunlock(hs);
198 LIBCFS_FREE(hs->hs_buckets,
199 sizeof(*hs->hs_buckets) << hs->hs_cur_bits);
204 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
206 if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
210 CFS_EXPORT_SYMBOL(cfs_hash_getref);
212 void cfs_hash_putref(cfs_hash_t *hs)
214 if (cfs_atomic_dec_and_test(&hs->hs_refcount))
215 cfs_hash_destroy(hs);
217 CFS_EXPORT_SYMBOL(cfs_hash_putref);
219 static inline unsigned int
220 cfs_hash_rehash_bits(cfs_hash_t *hs)
222 if (!(hs->hs_flags & CFS_HASH_REHASH))
225 /* XXX: need to handle case with max_theta != 2.0
226 * and the case with min_theta != 0.5 */
227 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
228 (__cfs_hash_theta(hs) > hs->hs_max_theta))
229 return hs->hs_cur_bits + 1;
231 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
232 (__cfs_hash_theta(hs) < hs->hs_min_theta))
233 return hs->hs_cur_bits - 1;
239 * Add item @hnode to libcfs hash @hs using @key. The registered
240 * ops->hs_get function will be called when the item is added.
243 cfs_hash_add(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
245 cfs_hash_bucket_t *hsb;
250 __cfs_hash_key_validate(hs, key, hnode);
253 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
254 hsb = hs->hs_buckets[i];
255 LASSERT(i <= hs->hs_cur_mask);
256 LASSERT(cfs_hlist_unhashed(hnode));
258 cfs_write_lock(&hsb->hsb_rwlock);
259 __cfs_hash_bucket_add(hs, hsb, hnode);
260 cfs_write_unlock(&hsb->hsb_rwlock);
262 bits = cfs_hash_rehash_bits(hs);
263 cfs_hash_runlock(hs);
265 cfs_hash_rehash(hs, bits);
269 CFS_EXPORT_SYMBOL(cfs_hash_add);
271 static cfs_hlist_node_t *
272 cfs_hash_findadd_unique_hnode(cfs_hash_t *hs, void *key,
273 cfs_hlist_node_t *hnode)
276 cfs_hlist_node_t *ehnode;
277 cfs_hash_bucket_t *hsb;
281 __cfs_hash_key_validate(hs, key, hnode);
284 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
285 hsb = hs->hs_buckets[i];
286 LASSERT(i <= hs->hs_cur_mask);
287 LASSERT(cfs_hlist_unhashed(hnode));
289 cfs_write_lock(&hsb->hsb_rwlock);
290 ehnode = __cfs_hash_bucket_lookup(hs, hsb, key);
292 cfs_hash_get(hs, ehnode);
294 __cfs_hash_bucket_add(hs, hsb, hnode);
296 bits = cfs_hash_rehash_bits(hs);
298 cfs_write_unlock(&hsb->hsb_rwlock);
299 cfs_hash_runlock(hs);
301 cfs_hash_rehash(hs, bits);
307 * Add item @hnode to libcfs hash @hs using @key. The registered
308 * ops->hs_get function will be called if the item was added.
309 * Returns 0 on success or -EALREADY on key collisions.
312 cfs_hash_add_unique(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
314 cfs_hlist_node_t *ehnode;
317 ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
318 if (ehnode != hnode) {
319 cfs_hash_put(hs, ehnode);
324 CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
327 * Add item @hnode to libcfs hash @hs using @key. If this @key
328 * already exists in the hash then ops->hs_get will be called on the
329 * conflicting entry and that entry will be returned to the caller.
330 * Otherwise ops->hs_get is called on the item which was added.
333 cfs_hash_findadd_unique(cfs_hash_t *hs, void *key,
334 cfs_hlist_node_t *hnode)
336 cfs_hlist_node_t *ehnode;
340 ehnode = cfs_hash_findadd_unique_hnode(hs, key, hnode);
341 obj = cfs_hash_get(hs, ehnode);
342 cfs_hash_put(hs, ehnode);
345 CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
348 * Delete item @hnode from the libcfs hash @hs using @key. The @key
349 * is required to ensure the correct hash bucket is locked since there
350 * is no direct linkage from the item to the bucket. The object
351 * removed from the hash will be returned and obs->hs_put is called
352 * on the removed object.
355 cfs_hash_del(cfs_hash_t *hs, void *key, cfs_hlist_node_t *hnode)
357 cfs_hash_bucket_t *hsb;
362 __cfs_hash_key_validate(hs, key, hnode);
365 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
366 hsb = hs->hs_buckets[i];
367 LASSERT(i <= hs->hs_cur_mask);
368 LASSERT(!cfs_hlist_unhashed(hnode));
370 cfs_write_lock(&hsb->hsb_rwlock);
371 obj = __cfs_hash_bucket_del(hs, hsb, hnode);
372 cfs_write_unlock(&hsb->hsb_rwlock);
373 cfs_hash_runlock(hs);
377 CFS_EXPORT_SYMBOL(cfs_hash_del);
380 * Delete item given @key in libcfs hash @hs. The first @key found in
381 * the hash will be removed, if the key exists multiple times in the hash
382 * @hs this function must be called once per key. The removed object
383 * will be returned and ops->hs_put is called on the removed object.
386 cfs_hash_del_key(cfs_hash_t *hs, void *key)
389 cfs_hlist_node_t *hnode;
390 cfs_hash_bucket_t *hsb;
395 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
396 hsb = hs->hs_buckets[i];
397 LASSERT(i <= hs->hs_cur_mask);
399 cfs_write_lock(&hsb->hsb_rwlock);
400 hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
402 obj = __cfs_hash_bucket_del(hs, hsb, hnode);
404 cfs_write_unlock(&hsb->hsb_rwlock);
405 cfs_hash_runlock(hs);
409 CFS_EXPORT_SYMBOL(cfs_hash_del_key);
412 * Lookup an item using @key in the libcfs hash @hs and return it.
413 * If the @key is found in the hash hs->hs_get() is called and the
414 * matching objects is returned. It is the callers responsibility
415 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
416 * when when finished with the object. If the @key was not found
417 * in the hash @hs NULL is returned.
420 cfs_hash_lookup(cfs_hash_t *hs, void *key)
423 cfs_hlist_node_t *hnode;
424 cfs_hash_bucket_t *hsb;
429 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
430 hsb = hs->hs_buckets[i];
431 LASSERT(i <= hs->hs_cur_mask);
433 cfs_read_lock(&hsb->hsb_rwlock);
434 hnode = __cfs_hash_bucket_lookup(hs, hsb, key);
436 obj = cfs_hash_get(hs, hnode);
438 cfs_read_unlock(&hsb->hsb_rwlock);
439 cfs_hash_runlock(hs);
443 CFS_EXPORT_SYMBOL(cfs_hash_lookup);
446 * For each item in the libcfs hash @hs call the passed callback @func
447 * and pass to it as an argument each hash item and the private @data.
448 * Before each callback ops->hs_get will be called, and after each
449 * callback ops->hs_put will be called. Finally, during the callback
450 * the bucket lock is held so the callback must never sleep.
453 cfs_hash_for_each(cfs_hash_t *hs,
454 cfs_hash_for_each_cb_t func, void *data)
456 cfs_hlist_node_t *hnode;
457 cfs_hash_bucket_t *hsb;
463 cfs_hash_for_each_bucket(hs, hsb, i) {
464 cfs_read_lock(&hsb->hsb_rwlock);
465 cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
466 __cfs_hash_bucket_validate(hs, hsb, hnode);
467 obj = cfs_hash_get(hs, hnode);
469 (void)cfs_hash_put(hs, hnode);
471 cfs_read_unlock(&hsb->hsb_rwlock);
473 cfs_hash_runlock(hs);
477 CFS_EXPORT_SYMBOL(cfs_hash_for_each);
480 * For each item in the libcfs hash @hs call the passed callback @func
481 * and pass to it as an argument each hash item and the private @data.
482 * Before each callback ops->hs_get will be called, and after each
483 * callback ops->hs_put will be called. During the callback the
484 * bucket lock will not be held will allows for the current item
485 * to be removed from the hash during the callback. However, care
486 * should be taken to prevent other callers from operating on the
487 * hash concurrently or list corruption may occur.
490 cfs_hash_for_each_safe(cfs_hash_t *hs,
491 cfs_hash_for_each_cb_t func, void *data)
493 cfs_hlist_node_t *hnode;
494 cfs_hlist_node_t *pos;
495 cfs_hash_bucket_t *hsb;
501 cfs_hash_for_each_bucket(hs, hsb, i) {
502 cfs_read_lock(&hsb->hsb_rwlock);
503 cfs_hlist_for_each_safe(hnode, pos, &(hsb->hsb_head)) {
504 __cfs_hash_bucket_validate(hs, hsb, hnode);
505 obj = cfs_hash_get(hs, hnode);
506 cfs_read_unlock(&hsb->hsb_rwlock);
508 cfs_read_lock(&hsb->hsb_rwlock);
509 (void)cfs_hash_put(hs, hnode);
511 cfs_read_unlock(&hsb->hsb_rwlock);
513 cfs_hash_runlock(hs);
516 CFS_EXPORT_SYMBOL(cfs_hash_for_each_safe);
519 * For each hash bucket in the libcfs hash @hs call the passed callback
520 * @func until all the hash buckets are empty. The passed callback @func
521 * or the previously registered callback hs->hs_put must remove the item
522 * from the hash. You may either use the cfs_hash_del() or hlist_del()
523 * functions. No rwlocks will be held during the callback @func it is
524 * safe to sleep if needed. This function will not terminate until the
525 * hash is empty. Note it is still possible to concurrently add new
526 * items in to the hash. It is the callers responsibility to ensure
527 * the required locking is in place to prevent concurrent insertions.
530 cfs_hash_for_each_empty(cfs_hash_t *hs,
531 cfs_hash_for_each_cb_t func, void *data)
533 cfs_hlist_node_t *hnode;
534 cfs_hash_bucket_t *hsb;
535 cfs_hash_bucket_t **hsb_last = NULL;
542 /* If the hash table has changed since we last held lh_rwlock,
543 * we need to start traversing the list from the start. */
544 if (hs->hs_buckets != hsb_last) {
546 hsb_last = hs->hs_buckets;
548 cfs_hash_for_each_bucket_restart(hs, hsb, i) {
549 cfs_write_lock(&hsb->hsb_rwlock);
550 while (!cfs_hlist_empty(&hsb->hsb_head)) {
551 hnode = hsb->hsb_head.first;
552 __cfs_hash_bucket_validate(hs, hsb, hnode);
553 obj = cfs_hash_get(hs, hnode);
554 cfs_write_unlock(&hsb->hsb_rwlock);
555 cfs_hash_runlock(hs);
557 (void)cfs_hash_put(hs, hnode);
561 cfs_write_unlock(&hsb->hsb_rwlock);
563 cfs_hash_runlock(hs);
566 CFS_EXPORT_SYMBOL(cfs_hash_for_each_empty);
569 * For each item in the libcfs hash @hs which matches the @key call
570 * the passed callback @func and pass to it as an argument each hash
571 * item and the private @data. Before each callback ops->hs_get will
572 * be called, and after each callback ops->hs_put will be called.
573 * Finally, during the callback the bucket lock is held so the
574 * callback must never sleep.
577 cfs_hash_for_each_key(cfs_hash_t *hs, void *key,
578 cfs_hash_for_each_cb_t func, void *data)
580 cfs_hlist_node_t *hnode;
581 cfs_hash_bucket_t *hsb;
586 i = cfs_hash_id(hs, key, hs->hs_cur_mask);
587 hsb = hs->hs_buckets[i];
588 LASSERT(i <= hs->hs_cur_mask);
590 cfs_read_lock(&hsb->hsb_rwlock);
591 cfs_hlist_for_each(hnode, &(hsb->hsb_head)) {
592 __cfs_hash_bucket_validate(hs, hsb, hnode);
594 if (!cfs_hash_compare(hs, key, hnode))
597 func(cfs_hash_get(hs, hnode), data);
598 (void)cfs_hash_put(hs, hnode);
601 cfs_read_unlock(&hsb->hsb_rwlock);
602 cfs_hash_runlock(hs);
606 CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
609 * Rehash the libcfs hash @hs to the given @bits. This can be used
610 * to grow the hash size when excessive chaining is detected, or to
611 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
612 * flag is set in @hs the libcfs hash may be dynamically rehashed
613 * during addition or removal if the hash's theta value exceeds
614 * either the hs->hs_min_theta or hs->max_theta values. By default
615 * these values are tuned to keep the chained hash depth small, and
616 * this approach assumes a reasonably uniform hashing function. The
617 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
620 cfs_hash_rehash(cfs_hash_t *hs, int bits)
622 cfs_hlist_node_t *hnode;
623 cfs_hlist_node_t *pos;
624 cfs_hash_bucket_t **old_buckets;
625 cfs_hash_bucket_t **rehash_buckets;
626 cfs_hash_bucket_t *hs_hsb;
627 cfs_hash_bucket_t *rehash_hsb;
632 int new_mask = (1 << bits) - 1;
637 LASSERT(!cfs_in_interrupt());
638 LASSERT(new_mask > 0);
639 LASSERT((hs->hs_flags & CFS_HASH_REHASH) != 0);
641 LIBCFS_ALLOC(rehash_buckets, sizeof(*rehash_buckets) << bits);
645 for (i = 0; i <= new_mask; i++) {
646 CFS_ALLOC_PTR(rehash_buckets[i]);
647 if (rehash_buckets[i] == NULL)
648 GOTO(free, rc = -ENOMEM);
650 CFS_INIT_HLIST_HEAD(&rehash_buckets[i]->hsb_head);
651 cfs_rwlock_init(&rehash_buckets[i]->hsb_rwlock);
652 cfs_atomic_set(&rehash_buckets[i]->hsb_count, 0);
658 * Early return for multiple concurrent racing callers,
659 * ensure we only trigger the rehash if it is still needed.
661 theta = __cfs_hash_theta(hs);
662 if ((theta >= hs->hs_min_theta) && (theta <= hs->hs_max_theta)) {
663 cfs_hash_wunlock(hs);
664 GOTO(free, rc = -EALREADY);
667 old_bits = hs->hs_cur_bits;
668 old_buckets = hs->hs_buckets;
669 old_mask = (1 << old_bits) - 1;
671 hs->hs_cur_bits = bits;
672 hs->hs_cur_mask = (1 << bits) - 1;
673 hs->hs_buckets = rehash_buckets;
674 cfs_atomic_inc(&hs->hs_rehash_count);
676 for (i = 0; i <= old_mask; i++) {
677 hs_hsb = old_buckets[i];
679 cfs_write_lock(&hs_hsb->hsb_rwlock);
680 cfs_hlist_for_each_safe(hnode, pos, &(hs_hsb->hsb_head)) {
681 key = cfs_hash_key(hs, hnode);
685 * Validate hnode is in the correct bucket.
687 if (unlikely(hs->hs_flags & CFS_HASH_DEBUG))
688 LASSERT(cfs_hash_id(hs, key, old_mask) == i);
691 * Delete from old hash bucket.
693 cfs_hlist_del(hnode);
694 LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) > 0);
695 cfs_atomic_dec(&hs_hsb->hsb_count);
698 * Add to rehash bucket, ops->hs_key must be defined.
700 rehash_hsb = rehash_buckets[cfs_hash_id(hs, key,
702 cfs_hlist_add_head(hnode, &(rehash_hsb->hsb_head));
703 cfs_atomic_inc(&rehash_hsb->hsb_count);
706 LASSERT(cfs_hlist_empty(&(hs_hsb->hsb_head)));
707 LASSERT(cfs_atomic_read(&hs_hsb->hsb_count) == 0);
708 cfs_write_unlock(&hs_hsb->hsb_rwlock);
711 cfs_hash_wunlock(hs);
712 rehash_buckets = old_buckets;
717 CFS_FREE_PTR(rehash_buckets[i]);
718 LIBCFS_FREE(rehash_buckets, sizeof(*rehash_buckets) << bits);
721 CFS_EXPORT_SYMBOL(cfs_hash_rehash);
724 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
725 * @old_key must be provided to locate the objects previous location
726 * in the hash, and the @new_key will be used to reinsert the object.
727 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
728 * combo when it is critical that there is no window in time where the
729 * object is missing from the hash. When an object is being rehashed
730 * the registered cfs_hash_get() and cfs_hash_put() functions will
733 void cfs_hash_rehash_key(cfs_hash_t *hs, void *old_key, void *new_key,
734 cfs_hlist_node_t *hnode)
736 cfs_hash_bucket_t *old_hsb;
737 cfs_hash_bucket_t *new_hsb;
742 __cfs_hash_key_validate(hs, new_key, hnode);
743 LASSERT(!cfs_hlist_unhashed(hnode));
747 i = cfs_hash_id(hs, old_key, hs->hs_cur_mask);
748 old_hsb = hs->hs_buckets[i];
749 LASSERT(i <= hs->hs_cur_mask);
751 j = cfs_hash_id(hs, new_key, hs->hs_cur_mask);
752 new_hsb = hs->hs_buckets[j];
753 LASSERT(j <= hs->hs_cur_mask);
755 if (i < j) { /* write_lock ordering */
756 cfs_write_lock(&old_hsb->hsb_rwlock);
757 cfs_write_lock(&new_hsb->hsb_rwlock);
759 cfs_write_lock(&new_hsb->hsb_rwlock);
760 cfs_write_lock(&old_hsb->hsb_rwlock);
761 } else { /* do nothing */
762 cfs_read_unlock(&hs->hs_rwlock);
768 * Migrate item between hash buckets without calling
769 * the cfs_hash_get() and cfs_hash_put() callback functions.
771 cfs_hlist_del(hnode);
772 LASSERT(cfs_atomic_read(&old_hsb->hsb_count) > 0);
773 cfs_atomic_dec(&old_hsb->hsb_count);
774 cfs_hlist_add_head(hnode, &(new_hsb->hsb_head));
775 cfs_atomic_inc(&new_hsb->hsb_count);
777 cfs_write_unlock(&new_hsb->hsb_rwlock);
778 cfs_write_unlock(&old_hsb->hsb_rwlock);
779 cfs_hash_runlock(hs);
783 CFS_EXPORT_SYMBOL(cfs_hash_rehash_key);
785 int cfs_hash_debug_header(char *str, int size)
787 return snprintf(str, size,
788 "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n", CFS_MAX_HASH_NAME,
789 "name", "cur", "min", "max", "theta", "t-min", "t-max",
790 "flags", "rehash", "count", " distribution");
792 CFS_EXPORT_SYMBOL(cfs_hash_debug_header);
794 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
796 cfs_hash_bucket_t *hsb;
800 int dist[8] = { 0, };
802 if (str == NULL || size == 0)
806 theta = __cfs_hash_theta(hs);
808 c += snprintf(str + c, size - c, "%-*s ",
809 CFS_MAX_HASH_NAME, hs->hs_name);
810 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_cur_bits);
811 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_min_bits);
812 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_max_bits);
813 c += snprintf(str + c, size - c, "%d.%03d ",
814 __cfs_hash_theta_int(theta),
815 __cfs_hash_theta_frac(theta));
816 c += snprintf(str + c, size - c, "%d.%03d ",
817 __cfs_hash_theta_int(hs->hs_min_theta),
818 __cfs_hash_theta_frac(hs->hs_min_theta));
819 c += snprintf(str + c, size - c, "%d.%03d ",
820 __cfs_hash_theta_int(hs->hs_max_theta),
821 __cfs_hash_theta_frac(hs->hs_max_theta));
822 c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
823 c += snprintf(str + c, size - c, "%6d ",
824 cfs_atomic_read(&hs->hs_rehash_count));
825 c += snprintf(str + c, size - c, "%5d ",
826 cfs_atomic_read(&hs->hs_count));
829 * The distribution is a summary of the chained hash depth in
830 * each of the libcfs hash buckets. Each buckets hsb_count is
831 * divided by the hash theta value and used to generate a
832 * histogram of the hash distribution. A uniform hash will
833 * result in all hash buckets being close to the average thus
834 * only the first few entries in the histogram will be non-zero.
835 * If you hash function results in a non-uniform hash the will
836 * be observable by outlier bucks in the distribution histogram.
838 * Uniform hash distribution: 128/128/0/0/0/0/0/0
839 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
841 cfs_hash_for_each_bucket(hs, hsb, i)
842 dist[min(__cfs_fls(cfs_atomic_read(&hsb->hsb_count)/max(theta,1)),7)]++;
844 for (i = 0; i < 8; i++)
845 c += snprintf(str + c, size - c, "%d%c", dist[i],
846 (i == 7) ? '\n' : '/');
848 cfs_hash_runlock(hs);
852 CFS_EXPORT_SYMBOL(cfs_hash_debug_str);