4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(intead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can speicify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
96 * - support long name hash table (for param-tree)
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
110 #include <libcfs/libcfs.h>
112 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
113 static unsigned int warn_on_depth = 8;
114 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
115 "warning when hash depth is high.");
118 struct cfs_wi_sched *cfs_sched_rehash;
121 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
124 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
127 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
129 spin_lock(&lock->spin);
133 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
135 spin_unlock(&lock->spin);
139 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
142 read_lock(&lock->rw);
144 write_lock(&lock->rw);
148 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
151 read_unlock(&lock->rw);
153 write_unlock(&lock->rw);
157 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
159 .hs_lock = cfs_hash_nl_lock,
160 .hs_unlock = cfs_hash_nl_unlock,
161 .hs_bkt_lock = cfs_hash_nl_lock,
162 .hs_bkt_unlock = cfs_hash_nl_unlock,
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
168 .hs_lock = cfs_hash_spin_lock,
169 .hs_unlock = cfs_hash_spin_unlock,
170 .hs_bkt_lock = cfs_hash_nl_lock,
171 .hs_bkt_unlock = cfs_hash_nl_unlock,
174 /** spin bucket lock, rehash is enabled */
175 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
177 .hs_lock = cfs_hash_rw_lock,
178 .hs_unlock = cfs_hash_rw_unlock,
179 .hs_bkt_lock = cfs_hash_spin_lock,
180 .hs_bkt_unlock = cfs_hash_spin_unlock,
183 /** rw bucket lock, rehash is enabled */
184 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
186 .hs_lock = cfs_hash_rw_lock,
187 .hs_unlock = cfs_hash_rw_unlock,
188 .hs_bkt_lock = cfs_hash_rw_lock,
189 .hs_bkt_unlock = cfs_hash_rw_unlock,
192 /** spin bucket lock, rehash is disabled */
193 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
195 .hs_lock = cfs_hash_nl_lock,
196 .hs_unlock = cfs_hash_nl_unlock,
197 .hs_bkt_lock = cfs_hash_spin_lock,
198 .hs_bkt_unlock = cfs_hash_spin_unlock,
201 /** rw bucket lock, rehash is disabled */
202 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
204 .hs_lock = cfs_hash_nl_lock,
205 .hs_unlock = cfs_hash_nl_unlock,
206 .hs_bkt_lock = cfs_hash_rw_lock,
207 .hs_bkt_unlock = cfs_hash_rw_unlock,
211 cfs_hash_lock_setup(cfs_hash_t *hs)
213 if (cfs_hash_with_no_lock(hs)) {
214 hs->hs_lops = &cfs_hash_nl_lops;
216 } else if (cfs_hash_with_no_bktlock(hs)) {
217 hs->hs_lops = &cfs_hash_nbl_lops;
218 spin_lock_init(&hs->hs_lock.spin);
220 } else if (cfs_hash_with_rehash(hs)) {
221 rwlock_init(&hs->hs_lock.rw);
223 if (cfs_hash_with_rw_bktlock(hs))
224 hs->hs_lops = &cfs_hash_bkt_rw_lops;
225 else if (cfs_hash_with_spin_bktlock(hs))
226 hs->hs_lops = &cfs_hash_bkt_spin_lops;
230 if (cfs_hash_with_rw_bktlock(hs))
231 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
232 else if (cfs_hash_with_spin_bktlock(hs))
233 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
240 * Simple hash head without depth tracking
241 * new element is always added to head of hlist
244 cfs_hlist_head_t hh_head; /**< entries list */
248 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
250 return sizeof(cfs_hash_head_t);
253 static cfs_hlist_head_t *
254 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
256 cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
258 return &head[bd->bd_offset].hh_head;
262 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
263 cfs_hlist_node_t *hnode)
265 cfs_hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266 return -1; /* unknown depth */
270 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
271 cfs_hlist_node_t *hnode)
273 cfs_hlist_del_init(hnode);
274 return -1; /* unknown depth */
278 * Simple hash head with depth tracking
279 * new element is always added to head of hlist
282 cfs_hlist_head_t hd_head; /**< entries list */
283 unsigned int hd_depth; /**< list length */
284 } cfs_hash_head_dep_t;
287 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
289 return sizeof(cfs_hash_head_dep_t);
292 static cfs_hlist_head_t *
293 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
295 cfs_hash_head_dep_t *head;
297 head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
298 return &head[bd->bd_offset].hd_head;
302 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
303 cfs_hlist_node_t *hnode)
305 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
306 cfs_hash_head_dep_t, hd_head);
307 cfs_hlist_add_head(hnode, &hh->hd_head);
308 return ++hh->hd_depth;
312 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
313 cfs_hlist_node_t *hnode)
315 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
316 cfs_hash_head_dep_t, hd_head);
317 cfs_hlist_del_init(hnode);
318 return --hh->hd_depth;
322 * double links hash head without depth tracking
323 * new element is always added to tail of hlist
326 cfs_hlist_head_t dh_head; /**< entries list */
327 cfs_hlist_node_t *dh_tail; /**< the last entry */
331 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
333 return sizeof(cfs_hash_dhead_t);
336 static cfs_hlist_head_t *
337 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
339 cfs_hash_dhead_t *head;
341 head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
342 return &head[bd->bd_offset].dh_head;
346 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
347 cfs_hlist_node_t *hnode)
349 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
350 cfs_hash_dhead_t, dh_head);
352 if (dh->dh_tail != NULL) /* not empty */
353 cfs_hlist_add_after(dh->dh_tail, hnode);
354 else /* empty list */
355 cfs_hlist_add_head(hnode, &dh->dh_head);
357 return -1; /* unknown depth */
361 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
362 cfs_hlist_node_t *hnd)
364 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
365 cfs_hash_dhead_t, dh_head);
367 if (hnd->next == NULL) { /* it's the tail */
368 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
369 container_of(hnd->pprev, cfs_hlist_node_t, next);
371 cfs_hlist_del_init(hnd);
372 return -1; /* unknown depth */
376 * double links hash head with depth tracking
377 * new element is always added to tail of hlist
380 cfs_hlist_head_t dd_head; /**< entries list */
381 cfs_hlist_node_t *dd_tail; /**< the last entry */
382 unsigned int dd_depth; /**< list length */
383 } cfs_hash_dhead_dep_t;
386 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
388 return sizeof(cfs_hash_dhead_dep_t);
391 static cfs_hlist_head_t *
392 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
394 cfs_hash_dhead_dep_t *head;
396 head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
397 return &head[bd->bd_offset].dd_head;
401 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
402 cfs_hlist_node_t *hnode)
404 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
405 cfs_hash_dhead_dep_t, dd_head);
407 if (dh->dd_tail != NULL) /* not empty */
408 cfs_hlist_add_after(dh->dd_tail, hnode);
409 else /* empty list */
410 cfs_hlist_add_head(hnode, &dh->dd_head);
412 return ++dh->dd_depth;
416 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
417 cfs_hlist_node_t *hnd)
419 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
420 cfs_hash_dhead_dep_t, dd_head);
422 if (hnd->next == NULL) { /* it's the tail */
423 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
424 container_of(hnd->pprev, cfs_hlist_node_t, next);
426 cfs_hlist_del_init(hnd);
427 return --dh->dd_depth;
430 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
431 .hop_hhead = cfs_hash_hh_hhead,
432 .hop_hhead_size = cfs_hash_hh_hhead_size,
433 .hop_hnode_add = cfs_hash_hh_hnode_add,
434 .hop_hnode_del = cfs_hash_hh_hnode_del,
437 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
438 .hop_hhead = cfs_hash_hd_hhead,
439 .hop_hhead_size = cfs_hash_hd_hhead_size,
440 .hop_hnode_add = cfs_hash_hd_hnode_add,
441 .hop_hnode_del = cfs_hash_hd_hnode_del,
444 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
445 .hop_hhead = cfs_hash_dh_hhead,
446 .hop_hhead_size = cfs_hash_dh_hhead_size,
447 .hop_hnode_add = cfs_hash_dh_hnode_add,
448 .hop_hnode_del = cfs_hash_dh_hnode_del,
451 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
452 .hop_hhead = cfs_hash_dd_hhead,
453 .hop_hhead_size = cfs_hash_dd_hhead_size,
454 .hop_hnode_add = cfs_hash_dd_hnode_add,
455 .hop_hnode_del = cfs_hash_dd_hnode_del,
459 cfs_hash_hlist_setup(cfs_hash_t *hs)
461 if (cfs_hash_with_add_tail(hs)) {
462 hs->hs_hops = cfs_hash_with_depth(hs) ?
463 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
465 hs->hs_hops = cfs_hash_with_depth(hs) ?
466 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
471 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
472 unsigned int bits, const void *key, cfs_hash_bd_t *bd)
474 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
476 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
478 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
479 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
483 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
485 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
486 if (likely(hs->hs_rehash_buckets == NULL)) {
487 cfs_hash_bd_from_key(hs, hs->hs_buckets,
488 hs->hs_cur_bits, key, bd);
490 LASSERT(hs->hs_rehash_bits != 0);
491 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
492 hs->hs_rehash_bits, key, bd);
495 CFS_EXPORT_SYMBOL(cfs_hash_bd_get);
498 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
500 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
503 bd->bd_bucket->hsb_depmax = dep_cur;
504 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
505 if (likely(warn_on_depth == 0 ||
506 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
509 spin_lock(&hs->hs_dep_lock);
510 hs->hs_dep_max = dep_cur;
511 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
512 hs->hs_dep_off = bd->bd_offset;
513 hs->hs_dep_bits = hs->hs_cur_bits;
514 spin_unlock(&hs->hs_dep_lock);
516 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
521 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
522 cfs_hlist_node_t *hnode)
526 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
527 cfs_hash_bd_dep_record(hs, bd, rc);
528 bd->bd_bucket->hsb_version++;
529 if (unlikely(bd->bd_bucket->hsb_version == 0))
530 bd->bd_bucket->hsb_version++;
531 bd->bd_bucket->hsb_count++;
533 if (cfs_hash_with_counter(hs))
534 cfs_atomic_inc(&hs->hs_count);
535 if (!cfs_hash_with_no_itemref(hs))
536 cfs_hash_get(hs, hnode);
538 CFS_EXPORT_SYMBOL(cfs_hash_bd_add_locked);
541 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
542 cfs_hlist_node_t *hnode)
544 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
546 LASSERT(bd->bd_bucket->hsb_count > 0);
547 bd->bd_bucket->hsb_count--;
548 bd->bd_bucket->hsb_version++;
549 if (unlikely(bd->bd_bucket->hsb_version == 0))
550 bd->bd_bucket->hsb_version++;
552 if (cfs_hash_with_counter(hs)) {
553 LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
554 cfs_atomic_dec(&hs->hs_count);
556 if (!cfs_hash_with_no_itemref(hs))
557 cfs_hash_put_locked(hs, hnode);
559 CFS_EXPORT_SYMBOL(cfs_hash_bd_del_locked);
562 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
563 cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode)
565 cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
566 cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
569 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
572 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
573 * in cfs_hash_bd_del/add_locked */
574 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
575 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
576 cfs_hash_bd_dep_record(hs, bd_new, rc);
578 LASSERT(obkt->hsb_count > 0);
581 if (unlikely(obkt->hsb_version == 0))
585 if (unlikely(nbkt->hsb_version == 0))
588 CFS_EXPORT_SYMBOL(cfs_hash_bd_move_locked);
591 /** always set, for sanity (avoid ZERO intent) */
592 CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
593 /** return entry with a ref */
594 CFS_HS_LOOKUP_MASK_REF = 1 << 1,
595 /** add entry if not existing */
596 CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
597 /** delete entry, ignore other masks */
598 CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
601 typedef enum cfs_hash_lookup_intent {
602 /** return item w/o refcount */
603 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
604 /** return item with refcount */
605 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
606 CFS_HS_LOOKUP_MASK_REF),
607 /** return item w/o refcount if existed, otherwise add */
608 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
609 CFS_HS_LOOKUP_MASK_ADD),
610 /** return item with refcount if existed, otherwise add */
611 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
612 CFS_HS_LOOKUP_MASK_ADD),
613 /** delete if existed */
614 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
615 CFS_HS_LOOKUP_MASK_DEL)
616 } cfs_hash_lookup_intent_t;
618 static cfs_hlist_node_t *
619 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
620 const void *key, cfs_hlist_node_t *hnode,
621 cfs_hash_lookup_intent_t intent)
624 cfs_hlist_head_t *hhead = cfs_hash_bd_hhead(hs, bd);
625 cfs_hlist_node_t *ehnode;
626 cfs_hlist_node_t *match;
627 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
629 /* with this function, we can avoid a lot of useless refcount ops,
630 * which are expensive atomic operations most time. */
631 match = intent_add ? NULL : hnode;
632 cfs_hlist_for_each(ehnode, hhead) {
633 if (!cfs_hash_keycmp(hs, key, ehnode))
636 if (match != NULL && match != ehnode) /* can't match */
640 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
641 cfs_hash_bd_del_locked(hs, bd, ehnode);
645 /* caller wants refcount? */
646 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
647 cfs_hash_get(hs, ehnode);
654 LASSERT(hnode != NULL);
655 cfs_hash_bd_add_locked(hs, bd, hnode);
660 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
662 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
663 CFS_HS_LOOKUP_IT_FIND);
665 CFS_EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
668 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
669 const void *key, cfs_hlist_node_t *hnode,
672 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
673 CFS_HS_LOOKUP_IT_ADD |
674 (!noref * CFS_HS_LOOKUP_MASK_REF));
676 CFS_EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
679 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
680 const void *key, cfs_hlist_node_t *hnode)
682 /* hnode can be NULL, we find the first item with @key */
683 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
684 CFS_HS_LOOKUP_IT_FINDDEL);
686 CFS_EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
689 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
690 unsigned n, int excl)
692 cfs_hash_bucket_t *prev = NULL;
696 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
697 * NB: it's possible that several bds point to the same bucket but
698 * have different bd::bd_offset, so need take care of deadlock.
700 cfs_hash_for_each_bd(bds, n, i) {
701 if (prev == bds[i].bd_bucket)
704 LASSERT(prev == NULL ||
705 prev->hsb_index < bds[i].bd_bucket->hsb_index);
706 cfs_hash_bd_lock(hs, &bds[i], excl);
707 prev = bds[i].bd_bucket;
712 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
713 unsigned n, int excl)
715 cfs_hash_bucket_t *prev = NULL;
718 cfs_hash_for_each_bd(bds, n, i) {
719 if (prev != bds[i].bd_bucket) {
720 cfs_hash_bd_unlock(hs, &bds[i], excl);
721 prev = bds[i].bd_bucket;
726 static cfs_hlist_node_t *
727 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
728 unsigned n, const void *key)
730 cfs_hlist_node_t *ehnode;
733 cfs_hash_for_each_bd(bds, n, i) {
734 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
735 CFS_HS_LOOKUP_IT_FIND);
742 static cfs_hlist_node_t *
743 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
744 cfs_hash_bd_t *bds, unsigned n, const void *key,
745 cfs_hlist_node_t *hnode, int noref)
747 cfs_hlist_node_t *ehnode;
751 LASSERT(hnode != NULL);
752 intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
754 cfs_hash_for_each_bd(bds, n, i) {
755 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
761 if (i == 1) { /* only one bucket */
762 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
766 cfs_hash_bd_get(hs, key, &mybd);
767 cfs_hash_bd_add_locked(hs, &mybd, hnode);
773 static cfs_hlist_node_t *
774 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
775 unsigned n, const void *key,
776 cfs_hlist_node_t *hnode)
778 cfs_hlist_node_t *ehnode;
781 cfs_hash_for_each_bd(bds, n, i) {
782 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
783 CFS_HS_LOOKUP_IT_FINDDEL);
791 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
795 if (bd2->bd_bucket == NULL)
798 if (bd1->bd_bucket == NULL) {
800 bd2->bd_bucket = NULL;
804 rc = cfs_hash_bd_compare(bd1, bd2);
806 bd2->bd_bucket = NULL;
808 } else if (rc > 0) { /* swab bd1 and bd2 */
818 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
820 /* NB: caller should hold hs_lock.rw if REHASH is set */
821 cfs_hash_bd_from_key(hs, hs->hs_buckets,
822 hs->hs_cur_bits, key, &bds[0]);
823 if (likely(hs->hs_rehash_buckets == NULL)) {
824 /* no rehash or not rehashing */
825 bds[1].bd_bucket = NULL;
829 LASSERT(hs->hs_rehash_bits != 0);
830 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
831 hs->hs_rehash_bits, key, &bds[1]);
833 cfs_hash_bd_order(&bds[0], &bds[1]);
835 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_get);
838 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
840 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
842 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
845 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
847 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
849 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
852 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
855 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
857 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
860 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
861 const void *key, cfs_hlist_node_t *hnode,
864 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
867 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
870 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
871 const void *key, cfs_hlist_node_t *hnode)
873 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
875 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
878 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
879 int bkt_size, int prev_size, int size)
883 for (i = prev_size; i < size; i++) {
884 if (buckets[i] != NULL)
885 LIBCFS_FREE(buckets[i], bkt_size);
888 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
892 * Create or grow bucket memory. Return old_buckets if no allocation was
893 * needed, the newly allocated buckets if allocation was needed and
894 * successful, and NULL on error.
896 static cfs_hash_bucket_t **
897 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
898 unsigned int old_size, unsigned int new_size)
900 cfs_hash_bucket_t **new_bkts;
903 LASSERT(old_size == 0 || old_bkts != NULL);
905 if (old_bkts != NULL && old_size == new_size)
908 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
909 if (new_bkts == NULL)
912 if (old_bkts != NULL) {
913 memcpy(new_bkts, old_bkts,
914 min(old_size, new_size) * sizeof(*old_bkts));
917 for (i = old_size; i < new_size; i++) {
918 cfs_hlist_head_t *hhead;
921 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
922 if (new_bkts[i] == NULL) {
923 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
928 new_bkts[i]->hsb_index = i;
929 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
930 new_bkts[i]->hsb_depmax = -1; /* unknown */
931 bd.bd_bucket = new_bkts[i];
932 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
933 CFS_INIT_HLIST_HEAD(hhead);
935 if (cfs_hash_with_no_lock(hs) ||
936 cfs_hash_with_no_bktlock(hs))
939 if (cfs_hash_with_rw_bktlock(hs))
940 rwlock_init(&new_bkts[i]->hsb_lock.rw);
941 else if (cfs_hash_with_spin_bktlock(hs))
942 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
944 LBUG(); /* invalid use-case */
950 * Initialize new libcfs hash, where:
951 * @name - Descriptive hash name
952 * @cur_bits - Initial hash table size, in bits
953 * @max_bits - Maximum allowed hash table resize, in bits
954 * @ops - Registered hash table operations
955 * @flags - CFS_HASH_REHASH enable synamic hash resizing
956 * - CFS_HASH_SORT enable chained hash sort
958 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
960 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
961 static int cfs_hash_dep_print(cfs_workitem_t *wi)
963 cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
969 spin_lock(&hs->hs_dep_lock);
970 dep = hs->hs_dep_max;
971 bkt = hs->hs_dep_bkt;
972 off = hs->hs_dep_off;
973 bits = hs->hs_dep_bits;
974 spin_unlock(&hs->hs_dep_lock);
976 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
977 hs->hs_name, bits, dep, bkt, off);
978 spin_lock(&hs->hs_dep_lock);
979 hs->hs_dep_bits = 0; /* mark as workitem done */
980 spin_unlock(&hs->hs_dep_lock);
984 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
986 spin_lock_init(&hs->hs_dep_lock);
987 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
990 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
992 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
995 spin_lock(&hs->hs_dep_lock);
996 while (hs->hs_dep_bits != 0) {
997 spin_unlock(&hs->hs_dep_lock);
999 spin_lock(&hs->hs_dep_lock);
1001 spin_unlock(&hs->hs_dep_lock);
1004 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1006 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1007 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1009 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1012 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1013 unsigned bkt_bits, unsigned extra_bytes,
1014 unsigned min_theta, unsigned max_theta,
1015 cfs_hash_ops_t *ops, unsigned flags)
1022 CLASSERT(CFS_HASH_THETA_BITS < 15);
1024 LASSERT(name != NULL);
1025 LASSERT(ops != NULL);
1026 LASSERT(ops->hs_key);
1027 LASSERT(ops->hs_hash);
1028 LASSERT(ops->hs_object);
1029 LASSERT(ops->hs_keycmp);
1030 LASSERT(ops->hs_get != NULL);
1031 LASSERT(ops->hs_put_locked != NULL);
1033 if ((flags & CFS_HASH_REHASH) != 0)
1034 flags |= CFS_HASH_COUNTER; /* must have counter */
1036 LASSERT(cur_bits > 0);
1037 LASSERT(cur_bits >= bkt_bits);
1038 LASSERT(max_bits >= cur_bits && max_bits < 31);
1039 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1040 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1041 (flags & CFS_HASH_NO_LOCK) == 0));
1042 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1043 ops->hs_keycpy != NULL));
1045 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1046 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1047 LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1051 strncpy(hs->hs_name, name, len);
1052 hs->hs_name[len - 1] = '\0';
1053 hs->hs_flags = flags;
1055 cfs_atomic_set(&hs->hs_refcount, 1);
1056 cfs_atomic_set(&hs->hs_count, 0);
1058 cfs_hash_lock_setup(hs);
1059 cfs_hash_hlist_setup(hs);
1061 hs->hs_cur_bits = (__u8)cur_bits;
1062 hs->hs_min_bits = (__u8)cur_bits;
1063 hs->hs_max_bits = (__u8)max_bits;
1064 hs->hs_bkt_bits = (__u8)bkt_bits;
1067 hs->hs_extra_bytes = extra_bytes;
1068 hs->hs_rehash_bits = 0;
1069 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1070 cfs_hash_depth_wi_init(hs);
1072 if (cfs_hash_with_rehash(hs))
1073 __cfs_hash_set_theta(hs, min_theta, max_theta);
1075 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1077 if (hs->hs_buckets != NULL)
1080 LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1083 CFS_EXPORT_SYMBOL(cfs_hash_create);
1086 * Cleanup libcfs hash @hs.
1089 cfs_hash_destroy(cfs_hash_t *hs)
1091 cfs_hlist_node_t *hnode;
1092 cfs_hlist_node_t *pos;
1097 LASSERT(hs != NULL);
1098 LASSERT(!cfs_hash_is_exiting(hs) &&
1099 !cfs_hash_is_iterating(hs));
1102 * prohibit further rehashes, don't need any lock because
1103 * I'm the only (last) one can change it.
1106 if (cfs_hash_with_rehash(hs))
1107 cfs_hash_rehash_cancel(hs);
1109 cfs_hash_depth_wi_cancel(hs);
1110 /* rehash should be done/canceled */
1111 LASSERT(hs->hs_buckets != NULL &&
1112 hs->hs_rehash_buckets == NULL);
1114 cfs_hash_for_each_bucket(hs, &bd, i) {
1115 cfs_hlist_head_t *hhead;
1117 LASSERT(bd.bd_bucket != NULL);
1118 /* no need to take this lock, just for consistent code */
1119 cfs_hash_bd_lock(hs, &bd, 1);
1121 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1122 cfs_hlist_for_each_safe(hnode, pos, hhead) {
1123 LASSERTF(!cfs_hash_with_assert_empty(hs),
1124 "hash %s bucket %u(%u) is not "
1125 " empty: %u items left\n",
1126 hs->hs_name, bd.bd_bucket->hsb_index,
1127 bd.bd_offset, bd.bd_bucket->hsb_count);
1128 /* can't assert key valicate, because we
1129 * can interrupt rehash */
1130 cfs_hash_bd_del_locked(hs, &bd, hnode);
1131 cfs_hash_exit(hs, hnode);
1134 LASSERT(bd.bd_bucket->hsb_count == 0);
1135 cfs_hash_bd_unlock(hs, &bd, 1);
1139 LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
1141 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1142 0, CFS_HASH_NBKT(hs));
1143 i = cfs_hash_with_bigname(hs) ?
1144 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1145 LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1150 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1152 if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
1156 CFS_EXPORT_SYMBOL(cfs_hash_getref);
1158 void cfs_hash_putref(cfs_hash_t *hs)
1160 if (cfs_atomic_dec_and_test(&hs->hs_refcount))
1161 cfs_hash_destroy(hs);
1163 CFS_EXPORT_SYMBOL(cfs_hash_putref);
1166 cfs_hash_rehash_bits(cfs_hash_t *hs)
1168 if (cfs_hash_with_no_lock(hs) ||
1169 !cfs_hash_with_rehash(hs))
1172 if (unlikely(cfs_hash_is_exiting(hs)))
1175 if (unlikely(cfs_hash_is_rehashing(hs)))
1178 if (unlikely(cfs_hash_is_iterating(hs)))
1181 /* XXX: need to handle case with max_theta != 2.0
1182 * and the case with min_theta != 0.5 */
1183 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1184 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1185 return hs->hs_cur_bits + 1;
1187 if (!cfs_hash_with_shrink(hs))
1190 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1191 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1192 return hs->hs_cur_bits - 1;
1198 * don't allow inline rehash if:
1199 * - user wants non-blocking change (add/del) on hash table
1200 * - too many elements
1203 cfs_hash_rehash_inline(cfs_hash_t *hs)
1205 return !cfs_hash_with_nblk_change(hs) &&
1206 cfs_atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1210 * Add item @hnode to libcfs hash @hs using @key. The registered
1211 * ops->hs_get function will be called when the item is added.
1214 cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1219 LASSERT(cfs_hlist_unhashed(hnode));
1221 cfs_hash_lock(hs, 0);
1222 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1224 cfs_hash_key_validate(hs, key, hnode);
1225 cfs_hash_bd_add_locked(hs, &bd, hnode);
1227 cfs_hash_bd_unlock(hs, &bd, 1);
1229 bits = cfs_hash_rehash_bits(hs);
1230 cfs_hash_unlock(hs, 0);
1232 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1234 CFS_EXPORT_SYMBOL(cfs_hash_add);
1236 static cfs_hlist_node_t *
1237 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1238 cfs_hlist_node_t *hnode, int noref)
1240 cfs_hlist_node_t *ehnode;
1241 cfs_hash_bd_t bds[2];
1244 LASSERT(cfs_hlist_unhashed(hnode));
1246 cfs_hash_lock(hs, 0);
1247 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1249 cfs_hash_key_validate(hs, key, hnode);
1250 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1252 cfs_hash_dual_bd_unlock(hs, bds, 1);
1254 if (ehnode == hnode) /* new item added */
1255 bits = cfs_hash_rehash_bits(hs);
1256 cfs_hash_unlock(hs, 0);
1258 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1264 * Add item @hnode to libcfs hash @hs using @key. The registered
1265 * ops->hs_get function will be called if the item was added.
1266 * Returns 0 on success or -EALREADY on key collisions.
1269 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1271 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1274 CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
1277 * Add item @hnode to libcfs hash @hs using @key. If this @key
1278 * already exists in the hash then ops->hs_get will be called on the
1279 * conflicting entry and that entry will be returned to the caller.
1280 * Otherwise ops->hs_get is called on the item which was added.
1283 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1284 cfs_hlist_node_t *hnode)
1286 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1288 return cfs_hash_object(hs, hnode);
1290 CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
1293 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1294 * is required to ensure the correct hash bucket is locked since there
1295 * is no direct linkage from the item to the bucket. The object
1296 * removed from the hash will be returned and obs->hs_put is called
1297 * on the removed object.
1300 cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1304 cfs_hash_bd_t bds[2];
1306 cfs_hash_lock(hs, 0);
1307 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1309 /* NB: do nothing if @hnode is not in hash table */
1310 if (hnode == NULL || !cfs_hlist_unhashed(hnode)) {
1311 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1312 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1314 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1319 if (hnode != NULL) {
1320 obj = cfs_hash_object(hs, hnode);
1321 bits = cfs_hash_rehash_bits(hs);
1324 cfs_hash_dual_bd_unlock(hs, bds, 1);
1325 cfs_hash_unlock(hs, 0);
1327 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1331 CFS_EXPORT_SYMBOL(cfs_hash_del);
1334 * Delete item given @key in libcfs hash @hs. The first @key found in
1335 * the hash will be removed, if the key exists multiple times in the hash
1336 * @hs this function must be called once per key. The removed object
1337 * will be returned and ops->hs_put is called on the removed object.
1340 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1342 return cfs_hash_del(hs, key, NULL);
1344 CFS_EXPORT_SYMBOL(cfs_hash_del_key);
1347 * Lookup an item using @key in the libcfs hash @hs and return it.
1348 * If the @key is found in the hash hs->hs_get() is called and the
1349 * matching objects is returned. It is the callers responsibility
1350 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1351 * when when finished with the object. If the @key was not found
1352 * in the hash @hs NULL is returned.
1355 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1358 cfs_hlist_node_t *hnode;
1359 cfs_hash_bd_t bds[2];
1361 cfs_hash_lock(hs, 0);
1362 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1364 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1366 obj = cfs_hash_object(hs, hnode);
1368 cfs_hash_dual_bd_unlock(hs, bds, 0);
1369 cfs_hash_unlock(hs, 0);
1373 CFS_EXPORT_SYMBOL(cfs_hash_lookup);
1376 cfs_hash_for_each_enter(cfs_hash_t *hs)
1378 LASSERT(!cfs_hash_is_exiting(hs));
1380 if (!cfs_hash_with_rehash(hs))
1383 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1384 * because it's just an unreliable signal to rehash-thread,
1385 * rehash-thread will try to finsih rehash ASAP when seeing this.
1387 hs->hs_iterating = 1;
1389 cfs_hash_lock(hs, 1);
1392 /* NB: iteration is mostly called by service thread,
1393 * we tend to cancel pending rehash-requst, instead of
1394 * blocking service thread, we will relaunch rehash request
1395 * after iteration */
1396 if (cfs_hash_is_rehashing(hs))
1397 cfs_hash_rehash_cancel_locked(hs);
1398 cfs_hash_unlock(hs, 1);
1402 cfs_hash_for_each_exit(cfs_hash_t *hs)
1407 if (!cfs_hash_with_rehash(hs))
1409 cfs_hash_lock(hs, 1);
1410 remained = --hs->hs_iterators;
1411 bits = cfs_hash_rehash_bits(hs);
1412 cfs_hash_unlock(hs, 1);
1413 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1415 hs->hs_iterating = 0;
1417 cfs_hash_rehash(hs, cfs_atomic_read(&hs->hs_count) <
1423 * For each item in the libcfs hash @hs call the passed callback @func
1424 * and pass to it as an argument each hash item and the private @data.
1426 * a) the function may sleep!
1427 * b) during the callback:
1428 * . the bucket lock is held so the callback must never sleep.
1429 * . if @removal_safe is true, use can remove current item by
1430 * cfs_hash_bd_del_locked
1433 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1434 void *data, int remove_safe)
1436 cfs_hlist_node_t *hnode;
1437 cfs_hlist_node_t *pos;
1440 int excl = !!remove_safe;
1445 cfs_hash_for_each_enter(hs);
1447 cfs_hash_lock(hs, 0);
1448 LASSERT(!cfs_hash_is_rehashing(hs));
1450 cfs_hash_for_each_bucket(hs, &bd, i) {
1451 cfs_hlist_head_t *hhead;
1453 cfs_hash_bd_lock(hs, &bd, excl);
1454 if (func == NULL) { /* only glimpse size */
1455 count += bd.bd_bucket->hsb_count;
1456 cfs_hash_bd_unlock(hs, &bd, excl);
1460 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1461 cfs_hlist_for_each_safe(hnode, pos, hhead) {
1462 cfs_hash_bucket_validate(hs, &bd, hnode);
1465 if (func(hs, &bd, hnode, data)) {
1466 cfs_hash_bd_unlock(hs, &bd, excl);
1471 cfs_hash_bd_unlock(hs, &bd, excl);
1472 if (loop < CFS_HASH_LOOP_HOG)
1475 cfs_hash_unlock(hs, 0);
1477 cfs_hash_lock(hs, 0);
1480 cfs_hash_unlock(hs, 0);
1482 cfs_hash_for_each_exit(hs);
1487 cfs_hash_cond_opt_cb_t func;
1489 } cfs_hash_cond_arg_t;
1492 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1493 cfs_hlist_node_t *hnode, void *data)
1495 cfs_hash_cond_arg_t *cond = data;
1497 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1498 cfs_hash_bd_del_locked(hs, bd, hnode);
1503 * Delete item from the libcfs hash @hs when @func return true.
1504 * The write lock being hold during loop for each bucket to avoid
1505 * any object be reference.
1508 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1510 cfs_hash_cond_arg_t arg = {
1515 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1517 CFS_EXPORT_SYMBOL(cfs_hash_cond_del);
1520 cfs_hash_for_each(cfs_hash_t *hs,
1521 cfs_hash_for_each_cb_t func, void *data)
1523 cfs_hash_for_each_tight(hs, func, data, 0);
1525 CFS_EXPORT_SYMBOL(cfs_hash_for_each);
1528 cfs_hash_for_each_safe(cfs_hash_t *hs,
1529 cfs_hash_for_each_cb_t func, void *data)
1531 cfs_hash_for_each_tight(hs, func, data, 1);
1533 CFS_EXPORT_SYMBOL(cfs_hash_for_each_safe);
1536 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1537 cfs_hlist_node_t *hnode, void *data)
1540 return 1; /* return 1 to break the loop */
1544 cfs_hash_is_empty(cfs_hash_t *hs)
1548 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1551 CFS_EXPORT_SYMBOL(cfs_hash_is_empty);
1554 cfs_hash_size_get(cfs_hash_t *hs)
1556 return cfs_hash_with_counter(hs) ?
1557 cfs_atomic_read(&hs->hs_count) :
1558 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1560 CFS_EXPORT_SYMBOL(cfs_hash_size_get);
1563 * cfs_hash_for_each_relax:
1564 * Iterate the hash table and call @func on each item without
1565 * any lock. This function can't guarantee to finish iteration
1566 * if these features are enabled:
1568 * a. if rehash_key is enabled, an item can be moved from
1569 * one bucket to another bucket
1570 * b. user can remove non-zero-ref item from hash-table,
1571 * so the item can be removed from hash-table, even worse,
1572 * it's possible that user changed key and insert to another
1574 * there's no way for us to finish iteration correctly on previous
1575 * two cases, so iteration has to be stopped on change.
1578 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1580 cfs_hlist_node_t *hnode;
1581 cfs_hlist_node_t *tmp;
1590 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1591 !cfs_hash_with_no_itemref(hs) ||
1592 CFS_HOP(hs, put_locked) == NULL;
1593 cfs_hash_lock(hs, 0);
1594 LASSERT(!cfs_hash_is_rehashing(hs));
1596 cfs_hash_for_each_bucket(hs, &bd, i) {
1597 cfs_hlist_head_t *hhead;
1599 cfs_hash_bd_lock(hs, &bd, 0);
1600 version = cfs_hash_bd_version_get(&bd);
1602 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1603 for (hnode = hhead->first; hnode != NULL;) {
1604 cfs_hash_bucket_validate(hs, &bd, hnode);
1605 cfs_hash_get(hs, hnode);
1606 cfs_hash_bd_unlock(hs, &bd, 0);
1607 cfs_hash_unlock(hs, 0);
1609 rc = func(hs, &bd, hnode, data);
1611 cfs_hash_put(hs, hnode);
1615 cfs_hash_lock(hs, 0);
1616 cfs_hash_bd_lock(hs, &bd, 0);
1617 if (!stop_on_change) {
1619 cfs_hash_put_locked(hs, hnode);
1621 } else { /* bucket changed? */
1623 cfs_hash_bd_version_get(&bd))
1625 /* safe to continue because no change */
1626 hnode = hnode->next;
1628 if (rc) /* callback wants to break iteration */
1632 cfs_hash_bd_unlock(hs, &bd, 0);
1634 cfs_hash_unlock(hs, 0);
1640 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1641 cfs_hash_for_each_cb_t func, void *data)
1645 if (cfs_hash_with_no_lock(hs) ||
1646 cfs_hash_with_rehash_key(hs) ||
1647 !cfs_hash_with_no_itemref(hs))
1648 RETURN(-EOPNOTSUPP);
1650 if (CFS_HOP(hs, get) == NULL ||
1651 (CFS_HOP(hs, put) == NULL &&
1652 CFS_HOP(hs, put_locked) == NULL))
1653 RETURN(-EOPNOTSUPP);
1655 cfs_hash_for_each_enter(hs);
1656 cfs_hash_for_each_relax(hs, func, data);
1657 cfs_hash_for_each_exit(hs);
1661 CFS_EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1664 * For each hash bucket in the libcfs hash @hs call the passed callback
1665 * @func until all the hash buckets are empty. The passed callback @func
1666 * or the previously registered callback hs->hs_put must remove the item
1667 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1668 * functions. No rwlocks will be held during the callback @func it is
1669 * safe to sleep if needed. This function will not terminate until the
1670 * hash is empty. Note it is still possible to concurrently add new
1671 * items in to the hash. It is the callers responsibility to ensure
1672 * the required locking is in place to prevent concurrent insertions.
1675 cfs_hash_for_each_empty(cfs_hash_t *hs,
1676 cfs_hash_for_each_cb_t func, void *data)
1681 if (cfs_hash_with_no_lock(hs))
1684 if (CFS_HOP(hs, get) == NULL ||
1685 (CFS_HOP(hs, put) == NULL &&
1686 CFS_HOP(hs, put_locked) == NULL))
1689 cfs_hash_for_each_enter(hs);
1690 while (cfs_hash_for_each_relax(hs, func, data)) {
1691 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1694 cfs_hash_for_each_exit(hs);
1697 CFS_EXPORT_SYMBOL(cfs_hash_for_each_empty);
1700 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1701 cfs_hash_for_each_cb_t func, void *data)
1703 cfs_hlist_head_t *hhead;
1704 cfs_hlist_node_t *hnode;
1707 cfs_hash_for_each_enter(hs);
1708 cfs_hash_lock(hs, 0);
1709 if (hindex >= CFS_HASH_NHLIST(hs))
1712 cfs_hash_bd_index_set(hs, hindex, &bd);
1714 cfs_hash_bd_lock(hs, &bd, 0);
1715 hhead = cfs_hash_bd_hhead(hs, &bd);
1716 cfs_hlist_for_each(hnode, hhead) {
1717 if (func(hs, &bd, hnode, data))
1720 cfs_hash_bd_unlock(hs, &bd, 0);
1722 cfs_hash_unlock(hs, 0);
1723 cfs_hash_for_each_exit(hs);
1726 CFS_EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1729 * For each item in the libcfs hash @hs which matches the @key call
1730 * the passed callback @func and pass to it as an argument each hash
1731 * item and the private @data. During the callback the bucket lock
1732 * is held so the callback must never sleep.
1735 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1736 cfs_hash_for_each_cb_t func, void *data)
1738 cfs_hlist_node_t *hnode;
1739 cfs_hash_bd_t bds[2];
1742 cfs_hash_lock(hs, 0);
1744 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1746 cfs_hash_for_each_bd(bds, 2, i) {
1747 cfs_hlist_head_t *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1749 cfs_hlist_for_each(hnode, hlist) {
1750 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1752 if (cfs_hash_keycmp(hs, key, hnode)) {
1753 if (func(hs, &bds[i], hnode, data))
1759 cfs_hash_dual_bd_unlock(hs, bds, 0);
1760 cfs_hash_unlock(hs, 0);
1762 CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
1765 * Rehash the libcfs hash @hs to the given @bits. This can be used
1766 * to grow the hash size when excessive chaining is detected, or to
1767 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1768 * flag is set in @hs the libcfs hash may be dynamically rehashed
1769 * during addition or removal if the hash's theta value exceeds
1770 * either the hs->hs_min_theta or hs->max_theta values. By default
1771 * these values are tuned to keep the chained hash depth small, and
1772 * this approach assumes a reasonably uniform hashing function. The
1773 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1776 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1780 /* need hold cfs_hash_lock(hs, 1) */
1781 LASSERT(cfs_hash_with_rehash(hs) &&
1782 !cfs_hash_with_no_lock(hs));
1784 if (!cfs_hash_is_rehashing(hs))
1787 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1788 hs->hs_rehash_bits = 0;
1792 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1793 cfs_hash_unlock(hs, 1);
1794 /* raise console warning while waiting too long */
1795 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1796 "hash %s is still rehashing, rescheded %d\n",
1797 hs->hs_name, i - 1);
1799 cfs_hash_lock(hs, 1);
1802 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1805 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1807 cfs_hash_lock(hs, 1);
1808 cfs_hash_rehash_cancel_locked(hs);
1809 cfs_hash_unlock(hs, 1);
1811 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1814 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1818 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1820 cfs_hash_lock(hs, 1);
1822 rc = cfs_hash_rehash_bits(hs);
1824 cfs_hash_unlock(hs, 1);
1828 hs->hs_rehash_bits = rc;
1830 /* launch and return */
1831 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1832 cfs_hash_unlock(hs, 1);
1836 /* rehash right now */
1837 cfs_hash_unlock(hs, 1);
1839 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1841 CFS_EXPORT_SYMBOL(cfs_hash_rehash);
1844 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1847 cfs_hlist_head_t *hhead;
1848 cfs_hlist_node_t *hnode;
1849 cfs_hlist_node_t *pos;
1853 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1854 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1855 cfs_hlist_for_each_safe(hnode, pos, hhead) {
1856 key = cfs_hash_key(hs, hnode);
1857 LASSERT(key != NULL);
1858 /* Validate hnode is in the correct bucket. */
1859 cfs_hash_bucket_validate(hs, old, hnode);
1861 * Delete from old hash bucket; move to new bucket.
1862 * ops->hs_key must be defined.
1864 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1865 hs->hs_rehash_bits, key, &new);
1866 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1875 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1877 cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1878 cfs_hash_bucket_t **bkts;
1880 unsigned int old_size;
1881 unsigned int new_size;
1887 LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1889 cfs_hash_lock(hs, 0);
1890 LASSERT(cfs_hash_is_rehashing(hs));
1892 old_size = CFS_HASH_NBKT(hs);
1893 new_size = CFS_HASH_RH_NBKT(hs);
1895 cfs_hash_unlock(hs, 0);
1898 * don't need hs::hs_rwlock for hs::hs_buckets,
1899 * because nobody can change bkt-table except me.
1901 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1902 old_size, new_size);
1903 cfs_hash_lock(hs, 1);
1909 if (bkts == hs->hs_buckets) {
1910 bkts = NULL; /* do nothing */
1914 rc = __cfs_hash_theta(hs);
1915 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1916 /* free the new allocated bkt-table */
1917 old_size = new_size;
1918 new_size = CFS_HASH_NBKT(hs);
1923 LASSERT(hs->hs_rehash_buckets == NULL);
1924 hs->hs_rehash_buckets = bkts;
1927 cfs_hash_for_each_bucket(hs, &bd, i) {
1928 if (cfs_hash_is_exiting(hs)) {
1930 /* someone wants to destroy the hash, abort now */
1931 if (old_size < new_size) /* OK to free old bkt-table */
1933 /* it's shrinking, need free new bkt-table */
1934 hs->hs_rehash_buckets = NULL;
1935 old_size = new_size;
1936 new_size = CFS_HASH_NBKT(hs);
1940 count += cfs_hash_rehash_bd(hs, &bd);
1941 if (count < CFS_HASH_LOOP_HOG ||
1942 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1947 cfs_hash_unlock(hs, 1);
1949 cfs_hash_lock(hs, 1);
1952 hs->hs_rehash_count++;
1954 bkts = hs->hs_buckets;
1955 hs->hs_buckets = hs->hs_rehash_buckets;
1956 hs->hs_rehash_buckets = NULL;
1958 hs->hs_cur_bits = hs->hs_rehash_bits;
1960 hs->hs_rehash_bits = 0;
1961 if (rc == -ESRCH) /* never be scheduled again */
1962 cfs_wi_exit(cfs_sched_rehash, wi);
1963 bsize = cfs_hash_bkt_size(hs);
1964 cfs_hash_unlock(hs, 1);
1965 /* can't refer to @hs anymore because it could be destroyed */
1967 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1969 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1970 /* return 1 only if cfs_wi_exit is called */
1971 return rc == -ESRCH;
1975 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1976 * @old_key must be provided to locate the objects previous location
1977 * in the hash, and the @new_key will be used to reinsert the object.
1978 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1979 * combo when it is critical that there is no window in time where the
1980 * object is missing from the hash. When an object is being rehashed
1981 * the registered cfs_hash_get() and cfs_hash_put() functions will
1984 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1985 void *new_key, cfs_hlist_node_t *hnode)
1987 cfs_hash_bd_t bds[3];
1988 cfs_hash_bd_t old_bds[2];
1989 cfs_hash_bd_t new_bd;
1991 LASSERT(!cfs_hlist_unhashed(hnode));
1993 cfs_hash_lock(hs, 0);
1995 cfs_hash_dual_bd_get(hs, old_key, old_bds);
1996 cfs_hash_bd_get(hs, new_key, &new_bd);
1998 bds[0] = old_bds[0];
1999 bds[1] = old_bds[1];
2002 /* NB: bds[0] and bds[1] are ordered already */
2003 cfs_hash_bd_order(&bds[1], &bds[2]);
2004 cfs_hash_bd_order(&bds[0], &bds[1]);
2006 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2007 if (likely(old_bds[1].bd_bucket == NULL)) {
2008 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2010 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2011 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2013 /* overwrite key inside locks, otherwise may screw up with
2014 * other operations, i.e: rehash */
2015 cfs_hash_keycpy(hs, new_key, hnode);
2017 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2018 cfs_hash_unlock(hs, 0);
2020 CFS_EXPORT_SYMBOL(cfs_hash_rehash_key);
2022 int cfs_hash_debug_header(char *str, int size)
2024 return snprintf(str, size, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2025 CFS_HASH_BIGNAME_LEN,
2026 "name", "cur", "min", "max", "theta", "t-min", "t-max",
2027 "flags", "rehash", "count", "maxdep", "maxdepb",
2030 CFS_EXPORT_SYMBOL(cfs_hash_debug_header);
2032 static cfs_hash_bucket_t **
2033 cfs_hash_full_bkts(cfs_hash_t *hs)
2035 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2036 if (hs->hs_rehash_buckets == NULL)
2037 return hs->hs_buckets;
2039 LASSERT(hs->hs_rehash_bits != 0);
2040 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2041 hs->hs_rehash_buckets : hs->hs_buckets;
2045 cfs_hash_full_nbkt(cfs_hash_t *hs)
2047 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2048 if (hs->hs_rehash_buckets == NULL)
2049 return CFS_HASH_NBKT(hs);
2051 LASSERT(hs->hs_rehash_bits != 0);
2052 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2053 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2056 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
2058 int dist[8] = { 0, };
2066 if (str == NULL || size == 0)
2069 cfs_hash_lock(hs, 0);
2070 theta = __cfs_hash_theta(hs);
2072 c += snprintf(str + c, size - c, "%-*s ",
2073 CFS_HASH_BIGNAME_LEN, hs->hs_name);
2074 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_cur_bits);
2075 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_min_bits);
2076 c += snprintf(str + c, size - c, "%5d ", 1 << hs->hs_max_bits);
2077 c += snprintf(str + c, size - c, "%d.%03d ",
2078 __cfs_hash_theta_int(theta),
2079 __cfs_hash_theta_frac(theta));
2080 c += snprintf(str + c, size - c, "%d.%03d ",
2081 __cfs_hash_theta_int(hs->hs_min_theta),
2082 __cfs_hash_theta_frac(hs->hs_min_theta));
2083 c += snprintf(str + c, size - c, "%d.%03d ",
2084 __cfs_hash_theta_int(hs->hs_max_theta),
2085 __cfs_hash_theta_frac(hs->hs_max_theta));
2086 c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
2087 c += snprintf(str + c, size - c, "%6d ", hs->hs_rehash_count);
2090 * The distribution is a summary of the chained hash depth in
2091 * each of the libcfs hash buckets. Each buckets hsb_count is
2092 * divided by the hash theta value and used to generate a
2093 * histogram of the hash distribution. A uniform hash will
2094 * result in all hash buckets being close to the average thus
2095 * only the first few entries in the histogram will be non-zero.
2096 * If you hash function results in a non-uniform hash the will
2097 * be observable by outlier bucks in the distribution histogram.
2099 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2100 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2102 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2105 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2106 cfs_hash_bd_lock(hs, &bd, 0);
2107 if (maxdep < bd.bd_bucket->hsb_depmax) {
2108 maxdep = bd.bd_bucket->hsb_depmax;
2110 maxdepb = ffz(~maxdep);
2113 total += bd.bd_bucket->hsb_count;
2114 dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2115 cfs_hash_bd_unlock(hs, &bd, 0);
2118 c += snprintf(str + c, size - c, "%7d ", total);
2119 c += snprintf(str + c, size - c, "%7d ", maxdep);
2120 c += snprintf(str + c, size - c, "%7d ", maxdepb);
2121 for (i = 0; i < 8; i++)
2122 c += snprintf(str + c, size - c, "%d%c", dist[i],
2123 (i == 7) ? '\n' : '/');
2125 cfs_hash_unlock(hs, 0);
2129 CFS_EXPORT_SYMBOL(cfs_hash_debug_str);