1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/class_hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * LH_DEBUG additional validation
46 * * LH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
52 #include <liblustre.h>
56 #include <class_hash.h>
59 * Initialize new lustre hash, where:
60 * @name - Descriptive hash name
61 * @cur_size - Initial hash table size
62 * @max_size - Maximum allowed hash table resize
63 * @ops - Registered hash table operations
64 * @flags - LH_REHASH enable synamic hash resizing
65 * - LH_SORT enable chained hash sort
68 lustre_hash_init(char *name, unsigned int cur_size, unsigned int max_size,
69 lustre_hash_ops_t *ops, int flags)
75 LASSERT(name != NULL);
79 * Ensure hash is a power of two to allow the use of a bitmask
80 * in the hash function instead of a more expensive modulus.
82 LASSERTF(cur_size && (cur_size & (cur_size - 1)) == 0,
83 "Size (%u) is not power of 2\n", cur_size);
84 LASSERTF(max_size && (max_size & (max_size - 1)) == 0,
85 "Size (%u) is not power of 2\n", max_size);
91 lh->lh_name_size = strlen(name) + 1;
92 rwlock_init(&lh->lh_rwlock);
94 OBD_ALLOC(lh->lh_name, lh->lh_name_size);
100 strncpy(lh->lh_name, name, lh->lh_name_size);
102 atomic_set(&lh->lh_count, 0);
103 atomic_set(&lh->lh_rehash_count, 0);
104 lh->lh_cur_size = cur_size;
105 lh->lh_min_size = cur_size;
106 lh->lh_max_size = max_size;
107 lh->lh_min_theta = 500; /* theta * 1000 */
108 lh->lh_max_theta = 2000; /* theta * 1000 */
110 lh->lh_flags = flags;
112 OBD_VMALLOC(lh->lh_buckets, sizeof(*lh->lh_buckets) * lh->lh_cur_size);
113 if (!lh->lh_buckets) {
114 OBD_FREE(lh->lh_name, lh->lh_name_size);
119 for (i = 0; i < lh->lh_cur_size; i++) {
120 INIT_HLIST_HEAD(&lh->lh_buckets[i].lhb_head);
121 rwlock_init(&lh->lh_buckets[i].lhb_rwlock);
122 atomic_set(&lh->lh_buckets[i].lhb_count, 0);
127 EXPORT_SYMBOL(lustre_hash_init);
130 * Cleanup lustre hash @lh.
133 lustre_hash_exit(lustre_hash_t *lh)
135 lustre_hash_bucket_t *lhb;
136 struct hlist_node *hnode;
137 struct hlist_node *pos;
144 write_lock(&lh->lh_rwlock);
146 lh_for_each_bucket(lh, lhb, i) {
147 write_lock(&lhb->lhb_rwlock);
148 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
149 __lustre_hash_bucket_validate(lh, lhb, hnode);
150 __lustre_hash_bucket_del(lh, lhb, hnode);
154 LASSERT(hlist_empty(&(lhb->lhb_head)));
155 LASSERT(atomic_read(&lhb->lhb_count) == 0);
156 write_unlock(&lhb->lhb_rwlock);
159 OBD_VFREE(lh->lh_buckets, sizeof(*lh->lh_buckets) * lh->lh_cur_size);
160 OBD_FREE(lh->lh_name, lh->lh_name_size);
162 LASSERT(atomic_read(&lh->lh_count) == 0);
163 write_unlock(&lh->lh_rwlock);
168 EXPORT_SYMBOL(lustre_hash_exit);
170 static inline unsigned int lustre_hash_rehash_size(lustre_hash_t *lh)
172 if (!(lh->lh_flags & LH_REHASH))
175 if ((lh->lh_cur_size < lh->lh_max_size) &&
176 (__lustre_hash_theta(lh) > lh->lh_max_theta))
177 return MIN(lh->lh_cur_size * 2, lh->lh_max_size);
179 if ((lh->lh_cur_size > lh->lh_min_size) &&
180 (__lustre_hash_theta(lh) < lh->lh_min_theta))
181 return MAX(lh->lh_cur_size / 2, lh->lh_min_size);
187 * Add item @hnode to lustre hash @lh using @key. The registered
188 * ops->lh_get function will be called when the item is added.
191 lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
193 lustre_hash_bucket_t *lhb;
198 __lustre_hash_key_validate(lh, key, hnode);
200 read_lock(&lh->lh_rwlock);
201 i = lh_hash(lh, key, lh->lh_cur_size - 1);
202 lhb = &lh->lh_buckets[i];
203 LASSERT(i < lh->lh_cur_size);
204 LASSERT(hlist_unhashed(hnode));
206 write_lock(&lhb->lhb_rwlock);
207 __lustre_hash_bucket_add(lh, lhb, hnode);
208 write_unlock(&lhb->lhb_rwlock);
210 size = lustre_hash_rehash_size(lh);
211 read_unlock(&lh->lh_rwlock);
213 lustre_hash_rehash(lh, size);
217 EXPORT_SYMBOL(lustre_hash_add);
220 * Add item @hnode to lustre hash @lh using @key. The registered
221 * ops->lh_get function will be called if the item was added.
222 * Returns 0 on success or -EALREADY on key collisions.
225 lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
227 lustre_hash_bucket_t *lhb;
233 __lustre_hash_key_validate(lh, key, hnode);
235 read_lock(&lh->lh_rwlock);
236 i = lh_hash(lh, key, lh->lh_cur_size - 1);
237 lhb = &lh->lh_buckets[i];
238 LASSERT(i < lh->lh_cur_size);
239 LASSERT(hlist_unhashed(hnode));
241 write_lock(&lhb->lhb_rwlock);
242 if (!__lustre_hash_bucket_lookup(lh, lhb, key)) {
243 __lustre_hash_bucket_add(lh, lhb, hnode);
246 write_unlock(&lhb->lhb_rwlock);
248 size = lustre_hash_rehash_size(lh);
249 read_unlock(&lh->lh_rwlock);
251 lustre_hash_rehash(lh, size);
255 EXPORT_SYMBOL(lustre_hash_add_unique);
258 * Add item @hnode to lustre hash @lh using @key. If this @key
259 * already exists in the hash then ops->lh_get will be called on the
260 * conflicting entry and that entry will be returned to the caller.
261 * Otherwise ops->lh_get is called on the item which was added.
264 lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
265 struct hlist_node *hnode)
267 struct hlist_node *existing_hnode;
268 lustre_hash_bucket_t *lhb;
274 __lustre_hash_key_validate(lh, key, hnode);
276 read_lock(&lh->lh_rwlock);
277 i = lh_hash(lh, key, lh->lh_cur_size - 1);
278 lhb = &lh->lh_buckets[i];
279 LASSERT(i < lh->lh_cur_size);
280 LASSERT(hlist_unhashed(hnode));
282 write_lock(&lhb->lhb_rwlock);
283 existing_hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
285 obj = lh_get(lh, existing_hnode);
287 obj = __lustre_hash_bucket_add(lh, lhb, hnode);
288 write_unlock(&lhb->lhb_rwlock);
290 size = lustre_hash_rehash_size(lh);
291 read_unlock(&lh->lh_rwlock);
293 lustre_hash_rehash(lh, size);
297 EXPORT_SYMBOL(lustre_hash_findadd_unique);
300 * Delete item @hnode from the lustre hash @lh using @key. The @key
301 * is required to ensure the correct hash bucket is locked since there
302 * is no direct linkage from the item to the bucket. The object
303 * removed from the hash will be returned and obs->lh_put is called
304 * on the removed object.
307 lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
309 lustre_hash_bucket_t *lhb;
315 __lustre_hash_key_validate(lh, key, hnode);
317 read_lock(&lh->lh_rwlock);
318 i = lh_hash(lh, key, lh->lh_cur_size - 1);
319 lhb = &lh->lh_buckets[i];
320 LASSERT(i < lh->lh_cur_size);
321 LASSERT(!hlist_unhashed(hnode));
323 write_lock(&lhb->lhb_rwlock);
324 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
325 write_unlock(&lhb->lhb_rwlock);
327 size = lustre_hash_rehash_size(lh);
328 read_unlock(&lh->lh_rwlock);
330 lustre_hash_rehash(lh, size);
334 EXPORT_SYMBOL(lustre_hash_del);
337 * Delete item given @key in lustre hash @lh. The first @key found in
338 * the hash will be removed, if the key exists multiple times in the hash
339 * @lh this function must be called once per key. The removed object
340 * will be returned and ops->lh_put is called on the removed object.
343 lustre_hash_del_key(lustre_hash_t *lh, void *key)
345 struct hlist_node *hnode;
346 lustre_hash_bucket_t *lhb;
352 read_lock(&lh->lh_rwlock);
353 i = lh_hash(lh, key, lh->lh_cur_size - 1);
354 lhb = &lh->lh_buckets[i];
355 LASSERT(i < lh->lh_cur_size);
357 write_lock(&lhb->lhb_rwlock);
358 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
360 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
362 write_unlock(&lhb->lhb_rwlock);
364 size = lustre_hash_rehash_size(lh);
365 read_unlock(&lh->lh_rwlock);
367 lustre_hash_rehash(lh, size);
371 EXPORT_SYMBOL(lustre_hash_del_key);
374 * Lookup an item using @key in the lustre hash @lh and return it.
375 * If the @key is found in the hash lh->lh_get() is called and the
376 * matching objects is returned. It is the callers responsibility
377 * to call the counterpart ops->lh_put using the lh_put() macro
378 * when when finished with the object. If the @key was not found
379 * in the hash @lh NULL is returned.
382 lustre_hash_lookup(lustre_hash_t *lh, void *key)
384 struct hlist_node *hnode;
385 lustre_hash_bucket_t *lhb;
390 read_lock(&lh->lh_rwlock);
391 i = lh_hash(lh, key, lh->lh_cur_size - 1);
392 lhb = &lh->lh_buckets[i];
393 LASSERT(i < lh->lh_cur_size);
395 read_lock(&lhb->lhb_rwlock);
396 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
398 obj = lh_get(lh, hnode);
400 read_unlock(&lhb->lhb_rwlock);
401 read_unlock(&lh->lh_rwlock);
405 EXPORT_SYMBOL(lustre_hash_lookup);
408 * For each item in the lustre hash @lh call the passed callback @func
409 * and pass to it as an argument each hash item and the private @data.
410 * Before each callback ops->lh_get will be called, and after each
411 * callback ops->lh_put will be called. Finally, during the callback
412 * the bucket lock is held so the callback must never sleep.
415 lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
417 struct hlist_node *hnode;
418 lustre_hash_bucket_t *lhb;
423 read_lock(&lh->lh_rwlock);
424 lh_for_each_bucket(lh, lhb, i) {
425 read_lock(&lhb->lhb_rwlock);
426 hlist_for_each(hnode, &(lhb->lhb_head)) {
427 __lustre_hash_bucket_validate(lh, lhb, hnode);
428 obj = lh_get(lh, hnode);
430 (void)lh_put(lh, hnode);
432 read_unlock(&lhb->lhb_rwlock);
434 read_unlock(&lh->lh_rwlock);
438 EXPORT_SYMBOL(lustre_hash_for_each);
441 * For each item in the lustre hash @lh call the passed callback @func
442 * and pass to it as an argument each hash item and the private @data.
443 * Before each callback ops->lh_get will be called, and after each
444 * callback ops->lh_put will be called. During the callback the
445 * bucket lock will not be held will allows for the current item
446 * to be removed from the hash during the callback. However, care
447 * should be taken to prevent other callers from operating on the
448 * hash concurrently or list corruption may occur.
451 lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
453 struct hlist_node *hnode;
454 struct hlist_node *pos;
455 lustre_hash_bucket_t *lhb;
460 read_lock(&lh->lh_rwlock);
461 lh_for_each_bucket(lh, lhb, i) {
462 read_lock(&lhb->lhb_rwlock);
463 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
464 __lustre_hash_bucket_validate(lh, lhb, hnode);
465 obj = lh_get(lh, hnode);
466 read_unlock(&lhb->lhb_rwlock);
468 read_lock(&lhb->lhb_rwlock);
469 (void)lh_put(lh, hnode);
471 read_unlock(&lhb->lhb_rwlock);
473 read_unlock(&lh->lh_rwlock);
476 EXPORT_SYMBOL(lustre_hash_for_each_safe);
479 * For each hash bucket in the lustre hash @lh call the passed callback
480 * @func until all the hash buckets are empty. The passed callback @func
481 * or the previously registered callback lh->lh_put must remove the item
482 * from the hash. You may either use the lustre_hash_del() or hlist_del()
483 * functions. No rwlocks will be held during the callback @func it is
484 * safe to sleep if needed. This function will not terminate until the
485 * hash is empty. Note it is still possible to concurrently add new
486 * items in to the hash. It is the callers responsibility to ensure
487 * the required locking is in place to prevent concurrent insertions.
490 lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
492 struct hlist_node *hnode;
493 lustre_hash_bucket_t *lhb;
499 read_lock(&lh->lh_rwlock);
500 lh_for_each_bucket(lh, lhb, i) {
501 write_lock(&lhb->lhb_rwlock);
502 while (!hlist_empty(&lhb->lhb_head)) {
503 hnode = lhb->lhb_head.first;
504 __lustre_hash_bucket_validate(lh, lhb, hnode);
505 obj = lh_get(lh, hnode);
506 write_unlock(&lhb->lhb_rwlock);
507 read_unlock(&lh->lh_rwlock);
509 (void)lh_put(lh, hnode);
512 write_unlock(&lhb->lhb_rwlock);
514 read_unlock(&lh->lh_rwlock);
517 EXPORT_SYMBOL(lustre_hash_for_each_empty);
520 * For each item in the lustre hash @lh which matches the @key call
521 * the passed callback @func and pass to it as an argument each hash
522 * item and the private @data. Before each callback ops->lh_get will
523 * be called, and after each callback ops->lh_put will be called.
524 * Finally, during the callback the bucket lock is held so the
525 * callback must never sleep.
528 lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
529 lh_for_each_cb func, void *data)
531 struct hlist_node *hnode;
532 lustre_hash_bucket_t *lhb;
536 read_lock(&lh->lh_rwlock);
537 i = lh_hash(lh, key, lh->lh_cur_size - 1);
538 lhb = &lh->lh_buckets[i];
539 LASSERT(i < lh->lh_cur_size);
541 read_lock(&lhb->lhb_rwlock);
542 hlist_for_each(hnode, &(lhb->lhb_head)) {
543 __lustre_hash_bucket_validate(lh, lhb, hnode);
545 if (!lh_compare(lh, key, hnode))
548 func(lh_get(lh, hnode), data);
549 (void)lh_put(lh, hnode);
552 read_unlock(&lhb->lhb_rwlock);
553 read_unlock(&lh->lh_rwlock);
557 EXPORT_SYMBOL(lustre_hash_for_each_key);
560 * Rehash the lustre hash @lh to the given @size. This can be used
561 * to grow the hash size when excessive chaining is detected, or to
562 * shrink the hash when it is larger than needed. When the LH_REHASH
563 * flag is set in @lh the lustre hash may be dynamically rehashed
564 * during addition or removal if the hash's theta value exceeds
565 * either the lh->lh_min_theta or lh->max_theta values. By default
566 * these values are tuned to keep the chained hash depth small, and
567 * this approach assumes a reasonably uniform hashing function. The
568 * theta thresholds for @lh are tunable via lustre_hash_set_theta().
571 lustre_hash_rehash(lustre_hash_t *lh, int size)
573 struct hlist_node *hnode;
574 struct hlist_node *pos;
575 lustre_hash_bucket_t *lh_buckets;
576 lustre_hash_bucket_t *rehash_buckets;
577 lustre_hash_bucket_t *lh_lhb;
578 lustre_hash_bucket_t *rehash_lhb;
587 OBD_VMALLOC(rehash_buckets, sizeof(*rehash_buckets) * size);
591 for (i = 0; i < size; i++) {
592 INIT_HLIST_HEAD(&rehash_buckets[i].lhb_head);
593 rwlock_init(&rehash_buckets[i].lhb_rwlock);
594 atomic_set(&rehash_buckets[i].lhb_count, 0);
597 write_lock(&lh->lh_rwlock);
600 * Early return for multiple concurrent racing callers,
601 * ensure we only trigger the rehash if it is still needed.
603 theta = __lustre_hash_theta(lh);
604 if ((theta >= lh->lh_min_theta) && (theta <= lh->lh_max_theta)) {
605 OBD_VFREE(rehash_buckets, sizeof(*rehash_buckets) * size);
606 write_unlock(&lh->lh_rwlock);
610 lh_size = lh->lh_cur_size;
611 lh_buckets = lh->lh_buckets;
613 lh->lh_cur_size = size;
614 lh->lh_buckets = rehash_buckets;
615 atomic_inc(&lh->lh_rehash_count);
617 for (i = 0; i < lh_size; i++) {
618 lh_lhb = &lh_buckets[i];
620 write_lock(&lh_lhb->lhb_rwlock);
621 hlist_for_each_safe(hnode, pos, &(lh_lhb->lhb_head)) {
622 key = lh_key(lh, hnode);
626 * Validate hnode is in the correct bucket.
628 if (unlikely(lh->lh_flags & LH_DEBUG))
629 LASSERT(lh_hash(lh, key, lh_size - 1) == i);
632 * Delete from old hash bucket.
635 LASSERT(atomic_read(&lh_lhb->lhb_count) > 0);
636 atomic_dec(&lh_lhb->lhb_count);
639 * Add to rehash bucket, ops->lh_key must be defined.
641 rehash_lhb = &rehash_buckets[lh_hash(lh, key, size-1)];
642 hlist_add_head(hnode, &(rehash_lhb->lhb_head));
643 atomic_inc(&rehash_lhb->lhb_count);
646 LASSERT(hlist_empty(&(lh_lhb->lhb_head)));
647 LASSERT(atomic_read(&lh_lhb->lhb_count) == 0);
648 write_unlock(&lh_lhb->lhb_rwlock);
651 OBD_VFREE(lh_buckets, sizeof(*lh_buckets) * lh_size);
652 write_unlock(&lh->lh_rwlock);
656 EXPORT_SYMBOL(lustre_hash_rehash);
659 * Rehash the object referenced by @hnode in the lustre hash @lh. The
660 * @old_key must be provided to locate the objects previous location
661 * in the hash, and the @new_key will be used to reinsert the object.
662 * Use this function instead of a lustre_hash_add() + lustre_hash_del()
663 * combo when it is critical that there is no window in time where the
664 * object is missing from the hash. When an object is being rehashed
665 * the registered lh_get() and lh_put() functions will not be called.
667 void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
668 struct hlist_node *hnode)
670 lustre_hash_bucket_t *old_lhb;
671 lustre_hash_bucket_t *new_lhb;
676 __lustre_hash_key_validate(lh, new_key, hnode);
677 LASSERT(!hlist_unhashed(hnode));
679 read_lock(&lh->lh_rwlock);
681 i = lh_hash(lh, old_key, lh->lh_cur_size - 1);
682 old_lhb = &lh->lh_buckets[i];
683 LASSERT(i < lh->lh_cur_size);
685 j = lh_hash(lh, new_key, lh->lh_cur_size - 1);
686 new_lhb = &lh->lh_buckets[j];
687 LASSERT(j < lh->lh_cur_size);
689 write_lock(&old_lhb->lhb_rwlock);
690 write_lock(&new_lhb->lhb_rwlock);
693 * Migrate item between hash buckets without calling
694 * the lh_get() and lh_put() callback functions.
697 LASSERT(atomic_read(&old_lhb->lhb_count) > 0);
698 atomic_dec(&old_lhb->lhb_count);
699 hlist_add_head(hnode, &(new_lhb->lhb_head));
700 atomic_inc(&new_lhb->lhb_count);
702 write_unlock(&new_lhb->lhb_rwlock);
703 write_unlock(&old_lhb->lhb_rwlock);
704 read_unlock(&lh->lh_rwlock);
708 EXPORT_SYMBOL(lustre_hash_rehash_key);
710 int lustre_hash_debug_header(char *str, int size)
712 return snprintf(str, size,
713 "%-36s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n",
714 "name", "cur", "min", "max", "theta", "t-min", "t-max",
715 "flags", "rehash", "count", " distribution");
717 EXPORT_SYMBOL(lustre_hash_debug_header);
719 int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
721 lustre_hash_bucket_t *lhb;
725 int dist[8] = { 0, };
727 if (str == NULL || size == 0)
730 read_lock(&lh->lh_rwlock);
731 theta = __lustre_hash_theta(lh);
733 c += snprintf(str + c, size - c, "%-36s ",lh->lh_name);
734 c += snprintf(str + c, size - c, "%5d ", lh->lh_cur_size);
735 c += snprintf(str + c, size - c, "%5d ", lh->lh_min_size);
736 c += snprintf(str + c, size - c, "%5d ", lh->lh_max_size);
737 c += snprintf(str + c, size - c, "%d.%03d ",
738 theta / 1000, theta % 1000);
739 c += snprintf(str + c, size - c, "%d.%03d ",
740 lh->lh_min_theta / 1000, lh->lh_min_theta % 1000);
741 c += snprintf(str + c, size - c, "%d.%03d ",
742 lh->lh_max_theta / 1000, lh->lh_max_theta % 1000);
743 c += snprintf(str + c, size - c, " 0x%02x ", lh->lh_flags);
744 c += snprintf(str + c, size - c, "%6d ",
745 atomic_read(&lh->lh_rehash_count));
746 c += snprintf(str + c, size - c, "%5d ",
747 atomic_read(&lh->lh_count));
750 * The distribution is a summary of the chained hash depth in
751 * each of the lustre hash buckets. Each buckets lhb_count is
752 * divided by the hash theta value and used to generate a
753 * histogram of the hash distribution. A uniform hash will
754 * result in all hash buckets being close to the average thus
755 * only the first few entries in the histogram will be non-zero.
756 * If you hash function results in a non-uniform hash the will
757 * be observable by outlier bucks in the distribution histogram.
759 * Uniform hash distribution: 128/128/0/0/0/0/0/0
760 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
762 lh_for_each_bucket(lh, lhb, i)
763 dist[MIN(fls(atomic_read(&lhb->lhb_count)/MAX(theta,1)),7)]++;
765 for (i = 0; i < 8; i++)
766 c += snprintf(str + c, size - c, "%d%c", dist[i],
767 (i == 7) ? '\n' : '/');
769 read_unlock(&lh->lh_rwlock);
773 EXPORT_SYMBOL(lustre_hash_debug_str);