1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/class_hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * LH_DEBUG additional validation
46 * * LH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
52 #include <liblustre.h>
56 #include <class_hash.h>
59 lh_read_lock(lustre_hash_t *lh)
61 if ((lh->lh_flags & LH_REHASH) != 0)
62 read_lock(&lh->lh_rwlock);
66 lh_read_unlock(lustre_hash_t *lh)
68 if ((lh->lh_flags & LH_REHASH) != 0)
69 read_unlock(&lh->lh_rwlock);
73 lh_write_lock(lustre_hash_t *lh)
75 if ((lh->lh_flags & LH_REHASH) != 0)
76 write_lock(&lh->lh_rwlock);
80 lh_write_unlock(lustre_hash_t *lh)
82 if ((lh->lh_flags & LH_REHASH) != 0)
83 write_unlock(&lh->lh_rwlock);
87 * Initialize new lustre hash, where:
88 * \param name Descriptive hash name
89 * \param cur_bits Initial hash table size, in bits
90 * \param max_bits Maximum allowed hash table resize, in bits
91 * \param ops Registered hash table operations
92 * \param flags LH_REHASH enables dynamic hash resizing
93 * LH_SORT enables chained hash sort
96 lustre_hash_init(char *name, unsigned int cur_bits, unsigned int max_bits,
97 lustre_hash_ops_t *ops, int flags)
103 LASSERT(name != NULL);
104 LASSERT(ops != NULL);
106 LASSERT(cur_bits > 0);
107 LASSERT(max_bits >= cur_bits);
108 LASSERT(max_bits < 31);
110 LIBCFS_ALLOC(lh, sizeof(*lh));
114 strncpy(lh->lh_name, name, sizeof(lh->lh_name));
115 lh->lh_name[sizeof(lh->lh_name) - 1] = '\0';
116 atomic_set(&lh->lh_rehash_count, 0);
117 atomic_set(&lh->lh_count, 0);
118 rwlock_init(&lh->lh_rwlock);
119 lh->lh_cur_bits = cur_bits;
120 lh->lh_cur_mask = (1 << cur_bits) - 1;
121 lh->lh_min_bits = cur_bits;
122 lh->lh_max_bits = max_bits;
123 /* XXX: need to fixup lustre_hash_rehash_bits() before this can be
124 * anything other than 0.5 and 2.0 */
125 lh->lh_min_theta = 1 << (LH_THETA_BITS - 1);
126 lh->lh_max_theta = 1 << (LH_THETA_BITS + 1);
128 lh->lh_flags = flags;
129 if (cur_bits != max_bits && (lh->lh_flags & LH_REHASH) == 0)
130 CWARN("Rehash is disabled, ignore max_bits %d\n", max_bits);
133 __lustre_hash_set_theta(lh, 500, 2000);
135 LIBCFS_ALLOC(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
136 if (lh->lh_buckets == NULL) {
137 LIBCFS_FREE(lh, sizeof(*lh));
141 for (i = 0; i <= lh->lh_cur_mask; i++) {
142 LIBCFS_ALLOC(lh->lh_buckets[i], sizeof(lustre_hash_bucket_t));
143 if (lh->lh_buckets[i] == NULL) {
144 lustre_hash_exit(lh);
148 INIT_HLIST_HEAD(&lh->lh_buckets[i]->lhb_head);
149 rwlock_init(&lh->lh_buckets[i]->lhb_rwlock);
150 atomic_set(&lh->lh_buckets[i]->lhb_count, 0);
155 EXPORT_SYMBOL(lustre_hash_init);
158 * Cleanup lustre hash \a lh.
161 lustre_hash_exit(lustre_hash_t *lh)
163 lustre_hash_bucket_t *lhb;
164 struct hlist_node *hnode;
165 struct hlist_node *pos;
173 lh_for_each_bucket(lh, lhb, i) {
177 write_lock(&lhb->lhb_rwlock);
178 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
179 __lustre_hash_bucket_validate(lh, lhb, hnode);
180 __lustre_hash_bucket_del(lh, lhb, hnode);
184 LASSERT(hlist_empty(&(lhb->lhb_head)));
185 LASSERT(atomic_read(&lhb->lhb_count) == 0);
186 write_unlock(&lhb->lhb_rwlock);
187 LIBCFS_FREE(lhb, sizeof(*lhb));
190 LASSERT(atomic_read(&lh->lh_count) == 0);
193 LIBCFS_FREE(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
194 LIBCFS_FREE(lh, sizeof(*lh));
197 EXPORT_SYMBOL(lustre_hash_exit);
199 static inline unsigned int lustre_hash_rehash_bits(lustre_hash_t *lh)
201 if (!(lh->lh_flags & LH_REHASH))
204 /* XXX: need to handle case with max_theta != 2.0
205 * and the case with min_theta != 0.5 */
206 if ((lh->lh_cur_bits < lh->lh_max_bits) &&
207 (__lustre_hash_theta(lh) > lh->lh_max_theta))
208 return lh->lh_cur_bits + 1;
210 if ((lh->lh_cur_bits > lh->lh_min_bits) &&
211 (__lustre_hash_theta(lh) < lh->lh_min_theta))
212 return lh->lh_cur_bits - 1;
218 * Add item \a hnode to lustre hash \a lh using \a key. The registered
219 * ops->lh_get function will be called when the item is added.
222 lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
224 lustre_hash_bucket_t *lhb;
229 __lustre_hash_key_validate(lh, key, hnode);
232 i = lh_hash(lh, key, lh->lh_cur_mask);
233 lhb = lh->lh_buckets[i];
234 LASSERT(i <= lh->lh_cur_mask);
235 LASSERT(hlist_unhashed(hnode));
237 write_lock(&lhb->lhb_rwlock);
238 __lustre_hash_bucket_add(lh, lhb, hnode);
239 write_unlock(&lhb->lhb_rwlock);
241 bits = lustre_hash_rehash_bits(lh);
244 lustre_hash_rehash(lh, bits);
248 EXPORT_SYMBOL(lustre_hash_add);
250 static struct hlist_node *
251 lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
252 struct hlist_node *hnode)
255 struct hlist_node *ehnode;
256 lustre_hash_bucket_t *lhb;
260 __lustre_hash_key_validate(lh, key, hnode);
263 i = lh_hash(lh, key, lh->lh_cur_mask);
264 lhb = lh->lh_buckets[i];
265 LASSERT(i <= lh->lh_cur_mask);
266 LASSERT(hlist_unhashed(hnode));
268 write_lock(&lhb->lhb_rwlock);
269 ehnode = __lustre_hash_bucket_lookup(lh, lhb, key);
273 __lustre_hash_bucket_add(lh, lhb, hnode);
275 bits = lustre_hash_rehash_bits(lh);
277 write_unlock(&lhb->lhb_rwlock);
280 lustre_hash_rehash(lh, bits);
286 * Add item \a hnode to lustre hash \a lh using \a key. The registered
287 * ops->lh_get function will be called if the item was added.
288 * Returns 0 on success or -EALREADY on key collisions.
291 lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
293 struct hlist_node *ehnode;
296 ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
297 if (ehnode != hnode) {
303 EXPORT_SYMBOL(lustre_hash_add_unique);
306 * Add item \a hnode to lustre hash \a lh using \a key. If this \a key
307 * already exists in the hash then ops->lh_get will be called on the
308 * conflicting entry and that entry will be returned to the caller.
309 * Otherwise ops->lh_get is called on the item which was added.
312 lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
313 struct hlist_node *hnode)
315 struct hlist_node *ehnode;
319 ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
320 obj = lh_get(lh, ehnode);
324 EXPORT_SYMBOL(lustre_hash_findadd_unique);
327 * Delete item \a hnode from the lustre hash \a lh using \a key. The \a key
328 * is required to ensure the correct hash bucket is locked since there
329 * is no direct linkage from the item to the bucket. The object
330 * removed from the hash will be returned and obs->lh_put is called
331 * on the removed object.
334 lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
336 lustre_hash_bucket_t *lhb;
341 __lustre_hash_key_validate(lh, key, hnode);
344 i = lh_hash(lh, key, lh->lh_cur_mask);
345 lhb = lh->lh_buckets[i];
346 LASSERT(i <= lh->lh_cur_mask);
347 LASSERT(!hlist_unhashed(hnode));
349 write_lock(&lhb->lhb_rwlock);
350 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
351 write_unlock(&lhb->lhb_rwlock);
356 EXPORT_SYMBOL(lustre_hash_del);
359 * Delete item given \a key in lustre hash \a lh. The first \a key found in
360 * the hash will be removed, if the key exists multiple times in the hash
361 * \a lh this function must be called once per key. The removed object
362 * will be returned and ops->lh_put is called on the removed object.
365 lustre_hash_del_key(lustre_hash_t *lh, void *key)
367 struct hlist_node *hnode;
368 lustre_hash_bucket_t *lhb;
374 i = lh_hash(lh, key, lh->lh_cur_mask);
375 lhb = lh->lh_buckets[i];
376 LASSERT(i <= lh->lh_cur_mask);
378 write_lock(&lhb->lhb_rwlock);
379 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
381 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
383 write_unlock(&lhb->lhb_rwlock);
388 EXPORT_SYMBOL(lustre_hash_del_key);
391 * Lookup an item using \a key in the lustre hash \a lh and return it.
392 * If the \a key is found in the hash lh->lh_get() is called and the
393 * matching objects is returned. It is the callers responsibility
394 * to call the counterpart ops->lh_put using the lh_put() macro
395 * when when finished with the object. If the \a key was not found
396 * in the hash \a lh NULL is returned.
399 lustre_hash_lookup(lustre_hash_t *lh, void *key)
401 struct hlist_node *hnode;
402 lustre_hash_bucket_t *lhb;
408 i = lh_hash(lh, key, lh->lh_cur_mask);
409 lhb = lh->lh_buckets[i];
410 LASSERT(i <= lh->lh_cur_mask);
412 read_lock(&lhb->lhb_rwlock);
413 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
415 obj = lh_get(lh, hnode);
417 read_unlock(&lhb->lhb_rwlock);
422 EXPORT_SYMBOL(lustre_hash_lookup);
425 * For each item in the lustre hash \a lh call the passed callback \a func
426 * and pass to it as an argument each hash item and the private \a data.
427 * Before each callback ops->lh_get will be called, and after each
428 * callback ops->lh_put will be called. Finally, during the callback
429 * the bucket lock is held so the callback must never sleep.
432 lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
434 struct hlist_node *hnode;
435 lustre_hash_bucket_t *lhb;
441 lh_for_each_bucket(lh, lhb, i) {
442 read_lock(&lhb->lhb_rwlock);
443 hlist_for_each(hnode, &(lhb->lhb_head)) {
444 __lustre_hash_bucket_validate(lh, lhb, hnode);
445 obj = lh_get(lh, hnode);
447 (void)lh_put(lh, hnode);
449 read_unlock(&lhb->lhb_rwlock);
455 EXPORT_SYMBOL(lustre_hash_for_each);
458 * For each item in the lustre hash \a lh call the passed callback \a func
459 * and pass to it as an argument each hash item and the private \a data.
460 * Before each callback ops->lh_get will be called, and after each
461 * callback ops->lh_put will be called. During the callback the
462 * bucket lock will not be held will allows for the current item
463 * to be removed from the hash during the callback. However, care
464 * should be taken to prevent other callers from operating on the
465 * hash concurrently or list corruption may occur.
468 lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
470 struct hlist_node *hnode;
471 struct hlist_node *pos;
472 lustre_hash_bucket_t *lhb;
478 lh_for_each_bucket(lh, lhb, i) {
479 read_lock(&lhb->lhb_rwlock);
480 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
481 __lustre_hash_bucket_validate(lh, lhb, hnode);
482 obj = lh_get(lh, hnode);
483 read_unlock(&lhb->lhb_rwlock);
485 read_lock(&lhb->lhb_rwlock);
486 (void)lh_put(lh, hnode);
488 read_unlock(&lhb->lhb_rwlock);
493 EXPORT_SYMBOL(lustre_hash_for_each_safe);
496 * For each hash bucket in the lustre hash \a lh call the passed callback
497 * \a func until all the hash buckets are empty. The passed callback \a func
498 * or the previously registered callback lh->lh_put must remove the item
499 * from the hash. You may either use the lustre_hash_del() or hlist_del()
500 * functions. No rwlocks will be held during the callback \a func it is
501 * safe to sleep if needed. This function will not terminate until the
502 * hash is empty. Note it is still possible to concurrently add new
503 * items in to the hash. It is the callers responsibility to ensure
504 * the required locking is in place to prevent concurrent insertions.
507 lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
509 struct hlist_node *hnode;
510 lustre_hash_bucket_t *lhb;
517 lh_for_each_bucket(lh, lhb, i) {
518 write_lock(&lhb->lhb_rwlock);
519 while (!hlist_empty(&lhb->lhb_head)) {
520 hnode = lhb->lhb_head.first;
521 __lustre_hash_bucket_validate(lh, lhb, hnode);
522 obj = lh_get(lh, hnode);
523 write_unlock(&lhb->lhb_rwlock);
526 (void)lh_put(lh, hnode);
529 write_unlock(&lhb->lhb_rwlock);
534 EXPORT_SYMBOL(lustre_hash_for_each_empty);
537 * For each item in the lustre hash \a lh which matches the \a key call
538 * the passed callback \a func and pass to it as an argument each hash
539 * item and the private \a data. Before each callback ops->lh_get will
540 * be called, and after each callback ops->lh_put will be called.
541 * Finally, during the callback the bucket lock is held so the
542 * callback must never sleep.
545 lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
546 lh_for_each_cb func, void *data)
548 struct hlist_node *hnode;
549 lustre_hash_bucket_t *lhb;
554 i = lh_hash(lh, key, lh->lh_cur_mask);
555 lhb = lh->lh_buckets[i];
556 LASSERT(i <= lh->lh_cur_mask);
558 read_lock(&lhb->lhb_rwlock);
559 hlist_for_each(hnode, &(lhb->lhb_head)) {
560 __lustre_hash_bucket_validate(lh, lhb, hnode);
562 if (!lh_compare(lh, key, hnode))
565 func(lh_get(lh, hnode), data);
566 (void)lh_put(lh, hnode);
569 read_unlock(&lhb->lhb_rwlock);
574 EXPORT_SYMBOL(lustre_hash_for_each_key);
577 * Rehash the lustre hash \a lh to the given \a bits. This can be used
578 * to grow the hash size when excessive chaining is detected, or to
579 * shrink the hash when it is larger than needed. When the LH_REHASH
580 * flag is set in \a lh the lustre hash may be dynamically rehashed
581 * during addition or removal if the hash's theta value exceeds
582 * either the lh->lh_min_theta or lh->max_theta values. By default
583 * these values are tuned to keep the chained hash depth small, and
584 * this approach assumes a reasonably uniform hashing function. The
585 * theta thresholds for \a lh are tunable via lustre_hash_set_theta().
588 lustre_hash_rehash(lustre_hash_t *lh, int bits)
590 struct hlist_node *hnode;
591 struct hlist_node *pos;
592 lustre_hash_bucket_t **lh_buckets;
593 lustre_hash_bucket_t **rehash_buckets;
594 lustre_hash_bucket_t *lh_lhb;
595 lustre_hash_bucket_t *rehash_lhb;
600 int mask = (1 << bits) - 1;
605 LASSERT(!in_interrupt());
607 LASSERT((lh->lh_flags & LH_REHASH) != 0);
609 LIBCFS_ALLOC(rehash_buckets, sizeof(*rehash_buckets) << bits);
613 for (i = 0; i <= mask; i++) {
614 LIBCFS_ALLOC(rehash_buckets[i], sizeof(*rehash_buckets[i]));
615 if (rehash_buckets[i] == NULL)
616 GOTO(free, rc = -ENOMEM);
618 INIT_HLIST_HEAD(&rehash_buckets[i]->lhb_head);
619 rwlock_init(&rehash_buckets[i]->lhb_rwlock);
620 atomic_set(&rehash_buckets[i]->lhb_count, 0);
626 * Early return for multiple concurrent racing callers,
627 * ensure we only trigger the rehash if it is still needed.
629 theta = __lustre_hash_theta(lh);
630 if ((theta >= lh->lh_min_theta) && (theta <= lh->lh_max_theta)) {
632 GOTO(free, rc = -EALREADY);
635 lh_bits = lh->lh_cur_bits;
636 lh_buckets = lh->lh_buckets;
637 lh_mask = (1 << lh_bits) - 1;
639 lh->lh_cur_bits = bits;
640 lh->lh_cur_mask = (1 << bits) - 1;
641 lh->lh_buckets = rehash_buckets;
642 atomic_inc(&lh->lh_rehash_count);
644 for (i = 0; i <= lh_mask; i++) {
645 lh_lhb = lh_buckets[i];
647 write_lock(&lh_lhb->lhb_rwlock);
648 hlist_for_each_safe(hnode, pos, &(lh_lhb->lhb_head)) {
649 key = lh_key(lh, hnode);
653 * Validate hnode is in the correct bucket.
655 if (unlikely(lh->lh_flags & LH_DEBUG))
656 LASSERT(lh_hash(lh, key, lh_mask) == i);
659 * Delete from old hash bucket.
662 LASSERT(atomic_read(&lh_lhb->lhb_count) > 0);
663 atomic_dec(&lh_lhb->lhb_count);
666 * Add to rehash bucket, ops->lh_key must be defined.
668 rehash_lhb = rehash_buckets[lh_hash(lh, key, mask)];
669 hlist_add_head(hnode, &(rehash_lhb->lhb_head));
670 atomic_inc(&rehash_lhb->lhb_count);
673 LASSERT(hlist_empty(&(lh_lhb->lhb_head)));
674 LASSERT(atomic_read(&lh_lhb->lhb_count) == 0);
675 write_unlock(&lh_lhb->lhb_rwlock);
679 rehash_buckets = lh_buckets;
680 i = (1 << lh_bits) - 1;
684 LIBCFS_FREE(rehash_buckets[i], sizeof(*rehash_buckets[i]));
685 LIBCFS_FREE(rehash_buckets, sizeof(*rehash_buckets) << bits);
688 EXPORT_SYMBOL(lustre_hash_rehash);
691 * Rehash the object referenced by \a hnode in the lustre hash \a lh. The
692 * \a old_key must be provided to locate the objects previous location
693 * in the hash, and the \a new_key will be used to reinsert the object.
694 * Use this function instead of a lustre_hash_add() + lustre_hash_del()
695 * combo when it is critical that there is no window in time where the
696 * object is missing from the hash. When an object is being rehashed
697 * the registered lh_get() and lh_put() functions will not be called.
699 void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
700 struct hlist_node *hnode)
702 lustre_hash_bucket_t *old_lhb;
703 lustre_hash_bucket_t *new_lhb;
708 __lustre_hash_key_validate(lh, new_key, hnode);
709 LASSERT(!hlist_unhashed(hnode));
713 i = lh_hash(lh, old_key, lh->lh_cur_mask);
714 old_lhb = lh->lh_buckets[i];
715 LASSERT(i <= lh->lh_cur_mask);
717 j = lh_hash(lh, new_key, lh->lh_cur_mask);
718 new_lhb = lh->lh_buckets[j];
719 LASSERT(j <= lh->lh_cur_mask);
721 if (i < j) { /* write_lock ordering */
722 write_lock(&old_lhb->lhb_rwlock);
723 write_lock(&new_lhb->lhb_rwlock);
725 write_lock(&new_lhb->lhb_rwlock);
726 write_lock(&old_lhb->lhb_rwlock);
727 } else { /* do nothing */
728 read_unlock(&lh->lh_rwlock);
734 * Migrate item between hash buckets without calling
735 * the lh_get() and lh_put() callback functions.
738 LASSERT(atomic_read(&old_lhb->lhb_count) > 0);
739 atomic_dec(&old_lhb->lhb_count);
740 hlist_add_head(hnode, &(new_lhb->lhb_head));
741 atomic_inc(&new_lhb->lhb_count);
743 write_unlock(&new_lhb->lhb_rwlock);
744 write_unlock(&old_lhb->lhb_rwlock);
749 EXPORT_SYMBOL(lustre_hash_rehash_key);
751 int lustre_hash_debug_header(char *str, int size)
753 return snprintf(str, size,
754 "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n", LUSTRE_MAX_HASH_NAME,
755 "name", "cur", "min", "max", "theta", "t-min", "t-max",
756 "flags", "rehash", "count", " distribution");
758 EXPORT_SYMBOL(lustre_hash_debug_header);
760 int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
762 lustre_hash_bucket_t *lhb;
766 int dist[8] = { 0, };
768 if (str == NULL || size == 0)
772 theta = __lustre_hash_theta(lh);
774 c += snprintf(str + c, size - c, "%-*s ",
775 LUSTRE_MAX_HASH_NAME, lh->lh_name);
776 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_cur_bits);
777 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_min_bits);
778 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_max_bits);
779 c += snprintf(str + c, size - c, "%d.%03d ",
780 __lustre_hash_theta_int(theta),
781 __lustre_hash_theta_frac(theta));
782 c += snprintf(str + c, size - c, "%d.%03d ",
783 __lustre_hash_theta_int(lh->lh_min_theta),
784 __lustre_hash_theta_frac(lh->lh_min_theta));
785 c += snprintf(str + c, size - c, "%d.%03d ",
786 __lustre_hash_theta_int(lh->lh_max_theta),
787 __lustre_hash_theta_frac(lh->lh_max_theta));
788 c += snprintf(str + c, size - c, " 0x%02x ", lh->lh_flags);
789 c += snprintf(str + c, size - c, "%6d ",
790 atomic_read(&lh->lh_rehash_count));
791 c += snprintf(str + c, size - c, "%5d ",
792 atomic_read(&lh->lh_count));
795 * The distribution is a summary of the chained hash depth in
796 * each of the lustre hash buckets. Each buckets lhb_count is
797 * divided by the hash theta value and used to generate a
798 * histogram of the hash distribution. A uniform hash will
799 * result in all hash buckets being close to the average thus
800 * only the first few entries in the histogram will be non-zero.
801 * If you hash function results in a non-uniform hash the will
802 * be observable by outlier bucks in the distribution histogram.
804 * Uniform hash distribution: 128/128/0/0/0/0/0/0
805 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
807 lh_for_each_bucket(lh, lhb, i)
808 dist[min(__fls(atomic_read(&lhb->lhb_count)/max(theta,1)),7)]++;
810 for (i = 0; i < 8; i++)
811 c += snprintf(str + c, size - c, "%d%c", dist[i],
812 (i == 7) ? '\n' : '/');
818 EXPORT_SYMBOL(lustre_hash_debug_str);