1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/class_hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * LH_DEBUG additional validation
46 * * LH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
52 #include <liblustre.h>
56 #include <class_hash.h>
59 * Initialize new lustre hash, where:
60 * @name - Descriptive hash name
61 * @cur_bits - Initial hash table size, in bits
62 * @max_bits - Maximum allowed hash table resize, in bits
63 * @ops - Registered hash table operations
64 * @flags - LH_REHASH enable synamic hash resizing
65 * - LH_SORT enable chained hash sort
68 lustre_hash_init(char *name, unsigned int cur_bits, unsigned int max_bits,
69 lustre_hash_ops_t *ops, int flags)
75 LASSERT(name != NULL);
78 LASSERT(cur_bits > 0);
79 LASSERT(max_bits >= cur_bits);
80 LASSERT(max_bits < 31);
86 strncpy(lh->lh_name, name, sizeof(lh->lh_name));
87 atomic_set(&lh->lh_rehash_count, 0);
88 atomic_set(&lh->lh_count, 0);
89 rwlock_init(&lh->lh_rwlock);
90 lh->lh_cur_bits = cur_bits;
91 lh->lh_cur_mask = (1 << cur_bits) - 1;
92 lh->lh_min_bits = cur_bits;
93 lh->lh_max_bits = max_bits;
94 /* XXX: need to fixup lustre_hash_rehash_bits() before this can be
95 * anything other than 0.5 and 2.0 */
96 lh->lh_min_theta = 1 << (LH_THETA_BITS - 1);
97 lh->lh_max_theta = 1 << (LH_THETA_BITS + 1);
102 __lustre_hash_set_theta(lh, 500, 2000);
104 OBD_VMALLOC(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
105 if (!lh->lh_buckets) {
110 for (i = 0; i <= lh->lh_cur_mask; i++) {
111 INIT_HLIST_HEAD(&lh->lh_buckets[i].lhb_head);
112 rwlock_init(&lh->lh_buckets[i].lhb_rwlock);
113 atomic_set(&lh->lh_buckets[i].lhb_count, 0);
118 EXPORT_SYMBOL(lustre_hash_init);
121 * Cleanup lustre hash @lh.
124 lustre_hash_exit(lustre_hash_t *lh)
126 lustre_hash_bucket_t *lhb;
127 struct hlist_node *hnode;
128 struct hlist_node *pos;
134 write_lock(&lh->lh_rwlock);
136 lh_for_each_bucket(lh, lhb, i) {
137 write_lock(&lhb->lhb_rwlock);
138 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
139 __lustre_hash_bucket_validate(lh, lhb, hnode);
140 __lustre_hash_bucket_del(lh, lhb, hnode);
144 LASSERT(hlist_empty(&(lhb->lhb_head)));
145 LASSERT(atomic_read(&lhb->lhb_count) == 0);
146 write_unlock(&lhb->lhb_rwlock);
149 OBD_VFREE(lh->lh_buckets, sizeof(*lh->lh_buckets) << lh->lh_cur_bits);
150 LASSERT(atomic_read(&lh->lh_count) == 0);
151 write_unlock(&lh->lh_rwlock);
156 EXPORT_SYMBOL(lustre_hash_exit);
158 static inline unsigned int lustre_hash_rehash_bits(lustre_hash_t *lh)
160 if (!(lh->lh_flags & LH_REHASH))
163 /* XXX: need to handle case with max_theta != 2.0
164 * and the case with min_theta != 0.5 */
165 if ((lh->lh_cur_bits < lh->lh_max_bits) &&
166 (__lustre_hash_theta(lh) > lh->lh_max_theta))
167 return lh->lh_cur_bits + 1;
169 if ((lh->lh_cur_bits > lh->lh_min_bits) &&
170 (__lustre_hash_theta(lh) < lh->lh_min_theta))
171 return lh->lh_cur_bits - 1;
177 * Add item @hnode to lustre hash @lh using @key. The registered
178 * ops->lh_get function will be called when the item is added.
181 lustre_hash_add(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
183 lustre_hash_bucket_t *lhb;
188 __lustre_hash_key_validate(lh, key, hnode);
190 read_lock(&lh->lh_rwlock);
191 i = lh_hash(lh, key, lh->lh_cur_mask);
192 lhb = &lh->lh_buckets[i];
193 LASSERT(i <= lh->lh_cur_mask);
194 LASSERT(hlist_unhashed(hnode));
196 write_lock(&lhb->lhb_rwlock);
197 __lustre_hash_bucket_add(lh, lhb, hnode);
198 write_unlock(&lhb->lhb_rwlock);
200 bits = lustre_hash_rehash_bits(lh);
201 read_unlock(&lh->lh_rwlock);
203 lustre_hash_rehash(lh, bits);
207 EXPORT_SYMBOL(lustre_hash_add);
209 static struct hlist_node *
210 lustre_hash_findadd_unique_hnode(lustre_hash_t *lh, void *key,
211 struct hlist_node *hnode)
214 struct hlist_node *ehnode;
215 lustre_hash_bucket_t *lhb;
219 __lustre_hash_key_validate(lh, key, hnode);
221 read_lock(&lh->lh_rwlock);
222 i = lh_hash(lh, key, lh->lh_cur_mask);
223 lhb = &lh->lh_buckets[i];
224 LASSERT(i <= lh->lh_cur_mask);
225 LASSERT(hlist_unhashed(hnode));
227 write_lock(&lhb->lhb_rwlock);
228 ehnode = __lustre_hash_bucket_lookup(lh, lhb, key);
232 __lustre_hash_bucket_add(lh, lhb, hnode);
234 bits = lustre_hash_rehash_bits(lh);
236 write_unlock(&lhb->lhb_rwlock);
237 read_unlock(&lh->lh_rwlock);
239 lustre_hash_rehash(lh, bits);
245 * Add item @hnode to lustre hash @lh using @key. The registered
246 * ops->lh_get function will be called if the item was added.
247 * Returns 0 on success or -EALREADY on key collisions.
250 lustre_hash_add_unique(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
252 struct hlist_node *ehnode;
255 ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
256 if (ehnode != hnode) {
262 EXPORT_SYMBOL(lustre_hash_add_unique);
265 * Add item @hnode to lustre hash @lh using @key. If this @key
266 * already exists in the hash then ops->lh_get will be called on the
267 * conflicting entry and that entry will be returned to the caller.
268 * Otherwise ops->lh_get is called on the item which was added.
271 lustre_hash_findadd_unique(lustre_hash_t *lh, void *key,
272 struct hlist_node *hnode)
274 struct hlist_node *ehnode;
278 ehnode = lustre_hash_findadd_unique_hnode(lh, key, hnode);
279 obj = lh_get(lh, ehnode);
283 EXPORT_SYMBOL(lustre_hash_findadd_unique);
286 * Delete item @hnode from the lustre hash @lh using @key. The @key
287 * is required to ensure the correct hash bucket is locked since there
288 * is no direct linkage from the item to the bucket. The object
289 * removed from the hash will be returned and obs->lh_put is called
290 * on the removed object.
293 lustre_hash_del(lustre_hash_t *lh, void *key, struct hlist_node *hnode)
295 lustre_hash_bucket_t *lhb;
300 __lustre_hash_key_validate(lh, key, hnode);
302 read_lock(&lh->lh_rwlock);
303 i = lh_hash(lh, key, lh->lh_cur_mask);
304 lhb = &lh->lh_buckets[i];
305 LASSERT(i <= lh->lh_cur_mask);
306 LASSERT(!hlist_unhashed(hnode));
308 write_lock(&lhb->lhb_rwlock);
309 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
310 write_unlock(&lhb->lhb_rwlock);
311 read_unlock(&lh->lh_rwlock);
315 EXPORT_SYMBOL(lustre_hash_del);
318 * Delete item given @key in lustre hash @lh. The first @key found in
319 * the hash will be removed, if the key exists multiple times in the hash
320 * @lh this function must be called once per key. The removed object
321 * will be returned and ops->lh_put is called on the removed object.
324 lustre_hash_del_key(lustre_hash_t *lh, void *key)
326 struct hlist_node *hnode;
327 lustre_hash_bucket_t *lhb;
332 read_lock(&lh->lh_rwlock);
333 i = lh_hash(lh, key, lh->lh_cur_mask);
334 lhb = &lh->lh_buckets[i];
335 LASSERT(i <= lh->lh_cur_mask);
337 write_lock(&lhb->lhb_rwlock);
338 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
340 obj = __lustre_hash_bucket_del(lh, lhb, hnode);
342 write_unlock(&lhb->lhb_rwlock);
343 read_unlock(&lh->lh_rwlock);
347 EXPORT_SYMBOL(lustre_hash_del_key);
350 * Lookup an item using @key in the lustre hash @lh and return it.
351 * If the @key is found in the hash lh->lh_get() is called and the
352 * matching objects is returned. It is the callers responsibility
353 * to call the counterpart ops->lh_put using the lh_put() macro
354 * when when finished with the object. If the @key was not found
355 * in the hash @lh NULL is returned.
358 lustre_hash_lookup(lustre_hash_t *lh, void *key)
360 struct hlist_node *hnode;
361 lustre_hash_bucket_t *lhb;
366 read_lock(&lh->lh_rwlock);
367 i = lh_hash(lh, key, lh->lh_cur_mask);
368 lhb = &lh->lh_buckets[i];
369 LASSERT(i <= lh->lh_cur_mask);
371 read_lock(&lhb->lhb_rwlock);
372 hnode = __lustre_hash_bucket_lookup(lh, lhb, key);
374 obj = lh_get(lh, hnode);
376 read_unlock(&lhb->lhb_rwlock);
377 read_unlock(&lh->lh_rwlock);
381 EXPORT_SYMBOL(lustre_hash_lookup);
384 * For each item in the lustre hash @lh call the passed callback @func
385 * and pass to it as an argument each hash item and the private @data.
386 * Before each callback ops->lh_get will be called, and after each
387 * callback ops->lh_put will be called. Finally, during the callback
388 * the bucket lock is held so the callback must never sleep.
391 lustre_hash_for_each(lustre_hash_t *lh, lh_for_each_cb func, void *data)
393 struct hlist_node *hnode;
394 lustre_hash_bucket_t *lhb;
399 read_lock(&lh->lh_rwlock);
400 lh_for_each_bucket(lh, lhb, i) {
401 read_lock(&lhb->lhb_rwlock);
402 hlist_for_each(hnode, &(lhb->lhb_head)) {
403 __lustre_hash_bucket_validate(lh, lhb, hnode);
404 obj = lh_get(lh, hnode);
406 (void)lh_put(lh, hnode);
408 read_unlock(&lhb->lhb_rwlock);
410 read_unlock(&lh->lh_rwlock);
414 EXPORT_SYMBOL(lustre_hash_for_each);
417 * For each item in the lustre hash @lh call the passed callback @func
418 * and pass to it as an argument each hash item and the private @data.
419 * Before each callback ops->lh_get will be called, and after each
420 * callback ops->lh_put will be called. During the callback the
421 * bucket lock will not be held will allows for the current item
422 * to be removed from the hash during the callback. However, care
423 * should be taken to prevent other callers from operating on the
424 * hash concurrently or list corruption may occur.
427 lustre_hash_for_each_safe(lustre_hash_t *lh, lh_for_each_cb func, void *data)
429 struct hlist_node *hnode;
430 struct hlist_node *pos;
431 lustre_hash_bucket_t *lhb;
436 read_lock(&lh->lh_rwlock);
437 lh_for_each_bucket(lh, lhb, i) {
438 read_lock(&lhb->lhb_rwlock);
439 hlist_for_each_safe(hnode, pos, &(lhb->lhb_head)) {
440 __lustre_hash_bucket_validate(lh, lhb, hnode);
441 obj = lh_get(lh, hnode);
442 read_unlock(&lhb->lhb_rwlock);
444 read_lock(&lhb->lhb_rwlock);
445 (void)lh_put(lh, hnode);
447 read_unlock(&lhb->lhb_rwlock);
449 read_unlock(&lh->lh_rwlock);
452 EXPORT_SYMBOL(lustre_hash_for_each_safe);
455 * For each hash bucket in the lustre hash @lh call the passed callback
456 * @func until all the hash buckets are empty. The passed callback @func
457 * or the previously registered callback lh->lh_put must remove the item
458 * from the hash. You may either use the lustre_hash_del() or hlist_del()
459 * functions. No rwlocks will be held during the callback @func it is
460 * safe to sleep if needed. This function will not terminate until the
461 * hash is empty. Note it is still possible to concurrently add new
462 * items in to the hash. It is the callers responsibility to ensure
463 * the required locking is in place to prevent concurrent insertions.
466 lustre_hash_for_each_empty(lustre_hash_t *lh, lh_for_each_cb func, void *data)
468 struct hlist_node *hnode;
469 lustre_hash_bucket_t *lhb;
475 read_lock(&lh->lh_rwlock);
476 lh_for_each_bucket(lh, lhb, i) {
477 write_lock(&lhb->lhb_rwlock);
478 while (!hlist_empty(&lhb->lhb_head)) {
479 hnode = lhb->lhb_head.first;
480 __lustre_hash_bucket_validate(lh, lhb, hnode);
481 obj = lh_get(lh, hnode);
482 write_unlock(&lhb->lhb_rwlock);
483 read_unlock(&lh->lh_rwlock);
485 (void)lh_put(lh, hnode);
488 write_unlock(&lhb->lhb_rwlock);
490 read_unlock(&lh->lh_rwlock);
493 EXPORT_SYMBOL(lustre_hash_for_each_empty);
496 * For each item in the lustre hash @lh which matches the @key call
497 * the passed callback @func and pass to it as an argument each hash
498 * item and the private @data. Before each callback ops->lh_get will
499 * be called, and after each callback ops->lh_put will be called.
500 * Finally, during the callback the bucket lock is held so the
501 * callback must never sleep.
504 lustre_hash_for_each_key(lustre_hash_t *lh, void *key,
505 lh_for_each_cb func, void *data)
507 struct hlist_node *hnode;
508 lustre_hash_bucket_t *lhb;
512 read_lock(&lh->lh_rwlock);
513 i = lh_hash(lh, key, lh->lh_cur_mask);
514 lhb = &lh->lh_buckets[i];
515 LASSERT(i <= lh->lh_cur_mask);
517 read_lock(&lhb->lhb_rwlock);
518 hlist_for_each(hnode, &(lhb->lhb_head)) {
519 __lustre_hash_bucket_validate(lh, lhb, hnode);
521 if (!lh_compare(lh, key, hnode))
524 func(lh_get(lh, hnode), data);
525 (void)lh_put(lh, hnode);
528 read_unlock(&lhb->lhb_rwlock);
529 read_unlock(&lh->lh_rwlock);
533 EXPORT_SYMBOL(lustre_hash_for_each_key);
536 * Rehash the lustre hash @lh to the given @bits. This can be used
537 * to grow the hash size when excessive chaining is detected, or to
538 * shrink the hash when it is larger than needed. When the LH_REHASH
539 * flag is set in @lh the lustre hash may be dynamically rehashed
540 * during addition or removal if the hash's theta value exceeds
541 * either the lh->lh_min_theta or lh->max_theta values. By default
542 * these values are tuned to keep the chained hash depth small, and
543 * this approach assumes a reasonably uniform hashing function. The
544 * theta thresholds for @lh are tunable via lustre_hash_set_theta().
547 lustre_hash_rehash(lustre_hash_t *lh, int bits)
549 struct hlist_node *hnode;
550 struct hlist_node *pos;
551 lustre_hash_bucket_t *lh_buckets;
552 lustre_hash_bucket_t *rehash_buckets;
553 lustre_hash_bucket_t *lh_lhb;
554 lustre_hash_bucket_t *rehash_lhb;
559 int mask = (1 << bits) - 1;
563 LASSERT(!in_interrupt());
566 OBD_VMALLOC(rehash_buckets, sizeof(*rehash_buckets) << bits);
570 for (i = 0; i <= mask; i++) {
571 INIT_HLIST_HEAD(&rehash_buckets[i].lhb_head);
572 rwlock_init(&rehash_buckets[i].lhb_rwlock);
573 atomic_set(&rehash_buckets[i].lhb_count, 0);
576 write_lock(&lh->lh_rwlock);
579 * Early return for multiple concurrent racing callers,
580 * ensure we only trigger the rehash if it is still needed.
582 theta = __lustre_hash_theta(lh);
583 if ((theta >= lh->lh_min_theta) && (theta <= lh->lh_max_theta)) {
584 OBD_VFREE(rehash_buckets, sizeof(*rehash_buckets) << bits);
585 write_unlock(&lh->lh_rwlock);
589 lh_bits = lh->lh_cur_bits;
590 lh_buckets = lh->lh_buckets;
591 lh_mask = (1 << lh_bits) - 1;
593 lh->lh_cur_bits = bits;
594 lh->lh_cur_mask = (1 << bits) - 1;
595 lh->lh_buckets = rehash_buckets;
596 atomic_inc(&lh->lh_rehash_count);
598 for (i = 0; i <= lh_mask; i++) {
599 lh_lhb = &lh_buckets[i];
601 write_lock(&lh_lhb->lhb_rwlock);
602 hlist_for_each_safe(hnode, pos, &(lh_lhb->lhb_head)) {
603 key = lh_key(lh, hnode);
607 * Validate hnode is in the correct bucket.
609 if (unlikely(lh->lh_flags & LH_DEBUG))
610 LASSERT(lh_hash(lh, key, lh_mask) == i);
613 * Delete from old hash bucket.
616 LASSERT(atomic_read(&lh_lhb->lhb_count) > 0);
617 atomic_dec(&lh_lhb->lhb_count);
620 * Add to rehash bucket, ops->lh_key must be defined.
622 rehash_lhb = &rehash_buckets[lh_hash(lh, key, mask)];
623 hlist_add_head(hnode, &(rehash_lhb->lhb_head));
624 atomic_inc(&rehash_lhb->lhb_count);
627 LASSERT(hlist_empty(&(lh_lhb->lhb_head)));
628 LASSERT(atomic_read(&lh_lhb->lhb_count) == 0);
629 write_unlock(&lh_lhb->lhb_rwlock);
632 OBD_VFREE(lh_buckets, sizeof(*lh_buckets) << lh_bits);
633 write_unlock(&lh->lh_rwlock);
637 EXPORT_SYMBOL(lustre_hash_rehash);
640 * Rehash the object referenced by @hnode in the lustre hash @lh. The
641 * @old_key must be provided to locate the objects previous location
642 * in the hash, and the @new_key will be used to reinsert the object.
643 * Use this function instead of a lustre_hash_add() + lustre_hash_del()
644 * combo when it is critical that there is no window in time where the
645 * object is missing from the hash. When an object is being rehashed
646 * the registered lh_get() and lh_put() functions will not be called.
648 void lustre_hash_rehash_key(lustre_hash_t *lh, void *old_key, void *new_key,
649 struct hlist_node *hnode)
651 lustre_hash_bucket_t *old_lhb;
652 lustre_hash_bucket_t *new_lhb;
657 __lustre_hash_key_validate(lh, new_key, hnode);
658 LASSERT(!hlist_unhashed(hnode));
660 read_lock(&lh->lh_rwlock);
662 i = lh_hash(lh, old_key, lh->lh_cur_mask);
663 old_lhb = &lh->lh_buckets[i];
664 LASSERT(i <= lh->lh_cur_mask);
666 j = lh_hash(lh, new_key, lh->lh_cur_mask);
667 new_lhb = &lh->lh_buckets[j];
668 LASSERT(j <= lh->lh_cur_mask);
670 write_lock(&old_lhb->lhb_rwlock);
671 write_lock(&new_lhb->lhb_rwlock);
674 * Migrate item between hash buckets without calling
675 * the lh_get() and lh_put() callback functions.
678 LASSERT(atomic_read(&old_lhb->lhb_count) > 0);
679 atomic_dec(&old_lhb->lhb_count);
680 hlist_add_head(hnode, &(new_lhb->lhb_head));
681 atomic_inc(&new_lhb->lhb_count);
683 write_unlock(&new_lhb->lhb_rwlock);
684 write_unlock(&old_lhb->lhb_rwlock);
685 read_unlock(&lh->lh_rwlock);
689 EXPORT_SYMBOL(lustre_hash_rehash_key);
691 int lustre_hash_debug_header(char *str, int size)
693 return snprintf(str, size,
694 "%-36s%6s%6s%6s%6s%6s%6s%6s%7s%6s%s\n",
695 "name", "cur", "min", "max", "theta", "t-min", "t-max",
696 "flags", "rehash", "count", " distribution");
698 EXPORT_SYMBOL(lustre_hash_debug_header);
700 int lustre_hash_debug_str(lustre_hash_t *lh, char *str, int size)
702 lustre_hash_bucket_t *lhb;
706 int dist[8] = { 0, };
708 if (str == NULL || size == 0)
711 read_lock(&lh->lh_rwlock);
712 theta = __lustre_hash_theta(lh);
714 c += snprintf(str + c, size - c, "%-36s ", lh->lh_name);
715 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_cur_bits);
716 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_min_bits);
717 c += snprintf(str + c, size - c, "%5d ", 1 << lh->lh_max_bits);
718 c += snprintf(str + c, size - c, "%d.%03d ",
719 __lustre_hash_theta_int(theta),
720 __lustre_hash_theta_frac(theta));
721 c += snprintf(str + c, size - c, "%d.%03d ",
722 __lustre_hash_theta_int(lh->lh_min_theta),
723 __lustre_hash_theta_frac(lh->lh_min_theta));
724 c += snprintf(str + c, size - c, "%d.%03d ",
725 __lustre_hash_theta_int(lh->lh_max_theta),
726 __lustre_hash_theta_frac(lh->lh_max_theta));
727 c += snprintf(str + c, size - c, " 0x%02x ", lh->lh_flags);
728 c += snprintf(str + c, size - c, "%6d ",
729 atomic_read(&lh->lh_rehash_count));
730 c += snprintf(str + c, size - c, "%5d ",
731 atomic_read(&lh->lh_count));
734 * The distribution is a summary of the chained hash depth in
735 * each of the lustre hash buckets. Each buckets lhb_count is
736 * divided by the hash theta value and used to generate a
737 * histogram of the hash distribution. A uniform hash will
738 * result in all hash buckets being close to the average thus
739 * only the first few entries in the histogram will be non-zero.
740 * If you hash function results in a non-uniform hash the will
741 * be observable by outlier bucks in the distribution histogram.
743 * Uniform hash distribution: 128/128/0/0/0/0/0/0
744 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
746 lh_for_each_bucket(lh, lhb, i)
747 dist[min(__fls(atomic_read(&lhb->lhb_count)/max(theta,1)),7)]++;
749 for (i = 0; i < 8; i++)
750 c += snprintf(str + c, size - c, "%d%c", dist[i],
751 (i == 7) ? '\n' : '/');
753 read_unlock(&lh->lh_rwlock);
757 EXPORT_SYMBOL(lustre_hash_debug_str);