4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * libcfs/libcfs/hash.c
34 * Implement a hash class for hash process in lustre system.
36 * Author: YuZhangyong <yzy@clusterfs.com>
38 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
39 * - Simplified API and improved documentation
40 * - Added per-hash feature flags:
41 * * CFS_HASH_DEBUG additional validation
42 * * CFS_HASH_REHASH dynamic rehashing
43 * - Added per-hash statistics
44 * - General performance enhancements
46 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
47 * - move all stuff to libcfs
48 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
49 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
50 * - buckets are allocated one by one(instead of contiguous memory),
51 * to avoid unnecessary cacheline conflict
53 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
54 * - "bucket" is a group of hlist_head now, user can specify bucket size
55 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
56 * one lock for reducing memory overhead.
58 * - support lockless hash, caller will take care of locks:
59 * avoid lock overhead for hash tables that are already protected
60 * by locking in the caller for another reason
62 * - support both spin_lock/rwlock for bucket:
63 * overhead of spinlock contention is lower than read/write
64 * contention of rwlock, so using spinlock to serialize operations on
65 * bucket is more reasonable for those frequently changed hash tables
67 * - support one-single lock mode:
68 * one lock to protect all hash operations to avoid overhead of
69 * multiple locks if hash table is always small
71 * - removed a lot of unnecessary addref & decref on hash element:
72 * addref & decref are atomic operations in many use-cases which
75 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
76 * some lustre use-cases require these functions to be strictly
77 * non-blocking, we need to schedule required rehash on a different
78 * thread on those cases.
80 * - safer rehash on large hash table
81 * In old implementation, rehash function will exclusively lock the
82 * hash table and finish rehash in one batch, it's dangerous on SMP
83 * system because rehash millions of elements could take long time.
84 * New implemented rehash can release lock and relax CPU in middle
85 * of rehash, it's safe for another thread to search/change on the
86 * hash table even it's in rehasing.
88 * - support two different refcount modes
89 * . hash table has refcount on element
90 * . hash table doesn't change refcount on adding/removing element
92 * - support long name hash table (for param-tree)
94 * - fix a bug for cfs_hash_rehash_key:
95 * in old implementation, cfs_hash_rehash_key could screw up the
96 * hash-table because @key is overwritten without any protection.
97 * Now we need user to define hs_keycpy for those rehash enabled
98 * hash tables, cfs_hash_rehash_key will overwrite hash-key
99 * inside lock by calling hs_keycpy.
101 * - better hash iteration:
102 * Now we support both locked iteration & lockless iteration of hash
103 * table. Also, user can break the iteration by return 1 in callback.
105 #include <linux/seq_file.h>
107 #include <libcfs/linux/linux-list.h>
108 #include <libcfs/libcfs.h>
110 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
111 static unsigned int warn_on_depth = 8;
112 module_param(warn_on_depth, uint, 0644);
113 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
116 struct cfs_wi_sched *cfs_sched_rehash;
119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
126 __acquires(&lock->spin)
128 spin_lock(&lock->spin);
132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
133 __releases(&lock->spin)
135 spin_unlock(&lock->spin);
139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
140 __acquires(&lock->rw)
143 read_lock(&lock->rw);
145 write_lock(&lock->rw);
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150 __releases(&lock->rw)
153 read_unlock(&lock->rw);
155 write_unlock(&lock->rw);
159 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
160 .hs_lock = cfs_hash_nl_lock,
161 .hs_unlock = cfs_hash_nl_unlock,
162 .hs_bkt_lock = cfs_hash_nl_lock,
163 .hs_bkt_unlock = cfs_hash_nl_unlock,
166 /** no bucket lock, one spinlock to protect everything */
167 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
168 .hs_lock = cfs_hash_spin_lock,
169 .hs_unlock = cfs_hash_spin_unlock,
170 .hs_bkt_lock = cfs_hash_nl_lock,
171 .hs_bkt_unlock = cfs_hash_nl_unlock,
174 /** spin bucket lock, rehash is enabled */
175 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
176 .hs_lock = cfs_hash_rw_lock,
177 .hs_unlock = cfs_hash_rw_unlock,
178 .hs_bkt_lock = cfs_hash_spin_lock,
179 .hs_bkt_unlock = cfs_hash_spin_unlock,
182 /** rw bucket lock, rehash is enabled */
183 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
184 .hs_lock = cfs_hash_rw_lock,
185 .hs_unlock = cfs_hash_rw_unlock,
186 .hs_bkt_lock = cfs_hash_rw_lock,
187 .hs_bkt_unlock = cfs_hash_rw_unlock,
190 /** spin bucket lock, rehash is disabled */
191 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
192 .hs_lock = cfs_hash_nl_lock,
193 .hs_unlock = cfs_hash_nl_unlock,
194 .hs_bkt_lock = cfs_hash_spin_lock,
195 .hs_bkt_unlock = cfs_hash_spin_unlock,
198 /** rw bucket lock, rehash is disabled */
199 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
200 .hs_lock = cfs_hash_nl_lock,
201 .hs_unlock = cfs_hash_nl_unlock,
202 .hs_bkt_lock = cfs_hash_rw_lock,
203 .hs_bkt_unlock = cfs_hash_rw_unlock,
207 cfs_hash_lock_setup(struct cfs_hash *hs)
209 if (cfs_hash_with_no_lock(hs)) {
210 hs->hs_lops = &cfs_hash_nl_lops;
212 } else if (cfs_hash_with_no_bktlock(hs)) {
213 hs->hs_lops = &cfs_hash_nbl_lops;
214 spin_lock_init(&hs->hs_lock.spin);
216 } else if (cfs_hash_with_rehash(hs)) {
217 rwlock_init(&hs->hs_lock.rw);
219 if (cfs_hash_with_rw_bktlock(hs))
220 hs->hs_lops = &cfs_hash_bkt_rw_lops;
221 else if (cfs_hash_with_spin_bktlock(hs))
222 hs->hs_lops = &cfs_hash_bkt_spin_lops;
226 if (cfs_hash_with_rw_bktlock(hs))
227 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
228 else if (cfs_hash_with_spin_bktlock(hs))
229 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
236 * Simple hash head without depth tracking
237 * new element is always added to head of hlist
239 struct cfs_hash_head {
240 struct hlist_head hh_head; /**< entries list */
244 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
246 return sizeof(struct cfs_hash_head);
249 static struct hlist_head *
250 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
252 struct cfs_hash_head *head;
254 head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
255 return &head[bd->bd_offset].hh_head;
259 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
260 struct hlist_node *hnode)
262 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
263 return -1; /* unknown depth */
267 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
268 struct hlist_node *hnode)
270 hlist_del_init(hnode);
271 return -1; /* unknown depth */
275 * Simple hash head with depth tracking
276 * new element is always added to head of hlist
278 struct cfs_hash_head_dep {
279 struct hlist_head hd_head; /**< entries list */
280 unsigned int hd_depth; /**< list length */
284 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
286 return sizeof(struct cfs_hash_head_dep);
289 static struct hlist_head *
290 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
292 struct cfs_hash_head_dep *head;
294 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
295 return &head[bd->bd_offset].hd_head;
299 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
300 struct hlist_node *hnode)
302 struct cfs_hash_head_dep *hh;
304 hh = container_of(cfs_hash_hd_hhead(hs, bd),
305 struct cfs_hash_head_dep, hd_head);
306 hlist_add_head(hnode, &hh->hd_head);
307 return ++hh->hd_depth;
311 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
312 struct hlist_node *hnode)
314 struct cfs_hash_head_dep *hh;
316 hh = container_of(cfs_hash_hd_hhead(hs, bd),
317 struct cfs_hash_head_dep, hd_head);
318 hlist_del_init(hnode);
319 return --hh->hd_depth;
323 * double links hash head without depth tracking
324 * new element is always added to tail of hlist
326 struct cfs_hash_dhead {
327 struct hlist_head dh_head; /**< entries list */
328 struct hlist_node *dh_tail; /**< the last entry */
332 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
334 return sizeof(struct cfs_hash_dhead);
337 static struct hlist_head *
338 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
340 struct cfs_hash_dhead *head;
342 head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
343 return &head[bd->bd_offset].dh_head;
347 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
348 struct hlist_node *hnode)
350 struct cfs_hash_dhead *dh;
352 dh = container_of(cfs_hash_dh_hhead(hs, bd),
353 struct cfs_hash_dhead, dh_head);
354 if (dh->dh_tail != NULL) /* not empty */
355 hlist_add_behind(hnode, dh->dh_tail);
356 else /* empty list */
357 hlist_add_head(hnode, &dh->dh_head);
359 return -1; /* unknown depth */
363 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
364 struct hlist_node *hnd)
366 struct cfs_hash_dhead *dh;
368 dh = container_of(cfs_hash_dh_hhead(hs, bd),
369 struct cfs_hash_dhead, dh_head);
370 if (hnd->next == NULL) { /* it's the tail */
371 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
372 container_of(hnd->pprev, struct hlist_node, next);
375 return -1; /* unknown depth */
379 * double links hash head with depth tracking
380 * new element is always added to tail of hlist
382 struct cfs_hash_dhead_dep {
383 struct hlist_head dd_head; /**< entries list */
384 struct hlist_node *dd_tail; /**< the last entry */
385 unsigned int dd_depth; /**< list length */
389 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
391 return sizeof(struct cfs_hash_dhead_dep);
394 static struct hlist_head *
395 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
397 struct cfs_hash_dhead_dep *head;
399 head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
400 return &head[bd->bd_offset].dd_head;
404 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
405 struct hlist_node *hnode)
407 struct cfs_hash_dhead_dep *dh;
409 dh = container_of(cfs_hash_dd_hhead(hs, bd),
410 struct cfs_hash_dhead_dep, dd_head);
411 if (dh->dd_tail != NULL) /* not empty */
412 hlist_add_behind(hnode, dh->dd_tail);
413 else /* empty list */
414 hlist_add_head(hnode, &dh->dd_head);
416 return ++dh->dd_depth;
420 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
421 struct hlist_node *hnd)
423 struct cfs_hash_dhead_dep *dh;
425 dh = container_of(cfs_hash_dd_hhead(hs, bd),
426 struct cfs_hash_dhead_dep, dd_head);
427 if (hnd->next == NULL) { /* it's the tail */
428 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
429 container_of(hnd->pprev, struct hlist_node, next);
432 return --dh->dd_depth;
435 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
436 .hop_hhead = cfs_hash_hh_hhead,
437 .hop_hhead_size = cfs_hash_hh_hhead_size,
438 .hop_hnode_add = cfs_hash_hh_hnode_add,
439 .hop_hnode_del = cfs_hash_hh_hnode_del,
442 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
443 .hop_hhead = cfs_hash_hd_hhead,
444 .hop_hhead_size = cfs_hash_hd_hhead_size,
445 .hop_hnode_add = cfs_hash_hd_hnode_add,
446 .hop_hnode_del = cfs_hash_hd_hnode_del,
449 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
450 .hop_hhead = cfs_hash_dh_hhead,
451 .hop_hhead_size = cfs_hash_dh_hhead_size,
452 .hop_hnode_add = cfs_hash_dh_hnode_add,
453 .hop_hnode_del = cfs_hash_dh_hnode_del,
456 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
457 .hop_hhead = cfs_hash_dd_hhead,
458 .hop_hhead_size = cfs_hash_dd_hhead_size,
459 .hop_hnode_add = cfs_hash_dd_hnode_add,
460 .hop_hnode_del = cfs_hash_dd_hnode_del,
464 cfs_hash_hlist_setup(struct cfs_hash *hs)
466 if (cfs_hash_with_add_tail(hs)) {
467 hs->hs_hops = cfs_hash_with_depth(hs) ?
468 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
470 hs->hs_hops = cfs_hash_with_depth(hs) ?
471 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
476 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
477 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
479 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
481 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
483 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
484 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
488 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
490 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
491 if (likely(hs->hs_rehash_buckets == NULL)) {
492 cfs_hash_bd_from_key(hs, hs->hs_buckets,
493 hs->hs_cur_bits, key, bd);
495 LASSERT(hs->hs_rehash_bits != 0);
496 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
497 hs->hs_rehash_bits, key, bd);
500 EXPORT_SYMBOL(cfs_hash_bd_get);
503 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
505 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
508 bd->bd_bucket->hsb_depmax = dep_cur;
509 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
510 if (likely(warn_on_depth == 0 ||
511 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
514 spin_lock(&hs->hs_dep_lock);
515 hs->hs_dep_max = dep_cur;
516 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
517 hs->hs_dep_off = bd->bd_offset;
518 hs->hs_dep_bits = hs->hs_cur_bits;
519 spin_unlock(&hs->hs_dep_lock);
521 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
526 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
527 struct hlist_node *hnode)
531 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
532 cfs_hash_bd_dep_record(hs, bd, rc);
533 bd->bd_bucket->hsb_version++;
534 if (unlikely(bd->bd_bucket->hsb_version == 0))
535 bd->bd_bucket->hsb_version++;
536 bd->bd_bucket->hsb_count++;
538 if (cfs_hash_with_counter(hs))
539 atomic_inc(&hs->hs_count);
540 if (!cfs_hash_with_no_itemref(hs))
541 cfs_hash_get(hs, hnode);
543 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
546 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
547 struct hlist_node *hnode)
549 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
551 LASSERT(bd->bd_bucket->hsb_count > 0);
552 bd->bd_bucket->hsb_count--;
553 bd->bd_bucket->hsb_version++;
554 if (unlikely(bd->bd_bucket->hsb_version == 0))
555 bd->bd_bucket->hsb_version++;
557 if (cfs_hash_with_counter(hs)) {
558 LASSERT(atomic_read(&hs->hs_count) > 0);
559 atomic_dec(&hs->hs_count);
561 if (!cfs_hash_with_no_itemref(hs))
562 cfs_hash_put_locked(hs, hnode);
564 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
567 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
568 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
570 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
571 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
574 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
577 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
578 * in cfs_hash_bd_del/add_locked */
579 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
580 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
581 cfs_hash_bd_dep_record(hs, bd_new, rc);
583 LASSERT(obkt->hsb_count > 0);
586 if (unlikely(obkt->hsb_version == 0))
590 if (unlikely(nbkt->hsb_version == 0))
595 /** always set, for sanity (avoid ZERO intent) */
596 CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
597 /** return entry with a ref */
598 CFS_HS_LOOKUP_MASK_REF = 1 << 1,
599 /** add entry if not existing */
600 CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
601 /** delete entry, ignore other masks */
602 CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
605 enum cfs_hash_lookup_intent {
606 /** return item w/o refcount */
607 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
608 /** return item with refcount */
609 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
610 CFS_HS_LOOKUP_MASK_REF),
611 /** return item w/o refcount if existed, otherwise add */
612 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
613 CFS_HS_LOOKUP_MASK_ADD),
614 /** return item with refcount if existed, otherwise add */
615 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
616 CFS_HS_LOOKUP_MASK_ADD),
617 /** delete if existed */
618 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
619 CFS_HS_LOOKUP_MASK_DEL)
622 static struct hlist_node *
623 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
624 const void *key, struct hlist_node *hnode,
625 enum cfs_hash_lookup_intent intent)
628 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
629 struct hlist_node *ehnode;
630 struct hlist_node *match;
631 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
633 /* with this function, we can avoid a lot of useless refcount ops,
634 * which are expensive atomic operations most time. */
635 match = intent_add ? NULL : hnode;
636 hlist_for_each(ehnode, hhead) {
637 if (!cfs_hash_keycmp(hs, key, ehnode))
640 if (match != NULL && match != ehnode) /* can't match */
644 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
645 cfs_hash_bd_del_locked(hs, bd, ehnode);
649 /* caller wants refcount? */
650 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
651 cfs_hash_get(hs, ehnode);
658 LASSERT(hnode != NULL);
659 cfs_hash_bd_add_locked(hs, bd, hnode);
664 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
667 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
668 CFS_HS_LOOKUP_IT_FIND);
670 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
673 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
676 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
677 CFS_HS_LOOKUP_IT_PEEK);
679 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
682 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
683 unsigned n, int excl)
685 struct cfs_hash_bucket *prev = NULL;
689 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
690 * NB: it's possible that several bds point to the same bucket but
691 * have different bd::bd_offset, so need take care of deadlock.
693 cfs_hash_for_each_bd(bds, n, i) {
694 if (prev == bds[i].bd_bucket)
697 LASSERT(prev == NULL ||
698 prev->hsb_index < bds[i].bd_bucket->hsb_index);
699 cfs_hash_bd_lock(hs, &bds[i], excl);
700 prev = bds[i].bd_bucket;
705 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
706 unsigned n, int excl)
708 struct cfs_hash_bucket *prev = NULL;
711 cfs_hash_for_each_bd(bds, n, i) {
712 if (prev != bds[i].bd_bucket) {
713 cfs_hash_bd_unlock(hs, &bds[i], excl);
714 prev = bds[i].bd_bucket;
719 static struct hlist_node *
720 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
721 unsigned n, const void *key)
723 struct hlist_node *ehnode;
726 cfs_hash_for_each_bd(bds, n, i) {
727 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
728 CFS_HS_LOOKUP_IT_FIND);
735 static struct hlist_node *
736 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
737 unsigned n, const void *key,
738 struct hlist_node *hnode, int noref)
740 struct hlist_node *ehnode;
744 LASSERT(hnode != NULL);
745 intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
747 cfs_hash_for_each_bd(bds, n, i) {
748 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
754 if (i == 1) { /* only one bucket */
755 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
757 struct cfs_hash_bd mybd;
759 cfs_hash_bd_get(hs, key, &mybd);
760 cfs_hash_bd_add_locked(hs, &mybd, hnode);
766 static struct hlist_node *
767 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
768 unsigned n, const void *key,
769 struct hlist_node *hnode)
771 struct hlist_node *ehnode;
774 cfs_hash_for_each_bd(bds, n, i) {
775 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
776 CFS_HS_LOOKUP_IT_FINDDEL);
784 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
788 if (bd2->bd_bucket == NULL)
791 if (bd1->bd_bucket == NULL) {
793 bd2->bd_bucket = NULL;
797 rc = cfs_hash_bd_compare(bd1, bd2);
799 bd2->bd_bucket = NULL;
801 } else if (rc > 0) { /* swab bd1 and bd2 */
802 struct cfs_hash_bd tmp;
811 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
812 struct cfs_hash_bd *bds)
814 /* NB: caller should hold hs_lock.rw if REHASH is set */
815 cfs_hash_bd_from_key(hs, hs->hs_buckets,
816 hs->hs_cur_bits, key, &bds[0]);
817 if (likely(hs->hs_rehash_buckets == NULL)) {
818 /* no rehash or not rehashing */
819 bds[1].bd_bucket = NULL;
823 LASSERT(hs->hs_rehash_bits != 0);
824 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
825 hs->hs_rehash_bits, key, &bds[1]);
827 cfs_hash_bd_order(&bds[0], &bds[1]);
831 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
833 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
837 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
839 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
843 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
846 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
850 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
851 const void *key, struct hlist_node *hnode,
854 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
859 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
860 const void *key, struct hlist_node *hnode)
862 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
866 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
867 int bkt_size, int prev_size, int size)
871 for (i = prev_size; i < size; i++) {
872 if (buckets[i] != NULL)
873 LIBCFS_FREE(buckets[i], bkt_size);
876 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
880 * Create or grow bucket memory. Return old_buckets if no allocation was
881 * needed, the newly allocated buckets if allocation was needed and
882 * successful, and NULL on error.
884 static struct cfs_hash_bucket **
885 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
886 unsigned int old_size, unsigned int new_size)
888 struct cfs_hash_bucket **new_bkts;
891 LASSERT(old_size == 0 || old_bkts != NULL);
893 if (old_bkts != NULL && old_size == new_size)
896 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
897 if (new_bkts == NULL)
900 if (old_bkts != NULL) {
901 memcpy(new_bkts, old_bkts,
902 min(old_size, new_size) * sizeof(*old_bkts));
905 for (i = old_size; i < new_size; i++) {
906 struct hlist_head *hhead;
907 struct cfs_hash_bd bd;
909 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
910 if (new_bkts[i] == NULL) {
911 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
916 new_bkts[i]->hsb_index = i;
917 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
918 new_bkts[i]->hsb_depmax = -1; /* unknown */
919 bd.bd_bucket = new_bkts[i];
920 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
921 INIT_HLIST_HEAD(hhead);
923 if (cfs_hash_with_no_lock(hs) ||
924 cfs_hash_with_no_bktlock(hs))
927 if (cfs_hash_with_rw_bktlock(hs))
928 rwlock_init(&new_bkts[i]->hsb_lock.rw);
929 else if (cfs_hash_with_spin_bktlock(hs))
930 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
932 LBUG(); /* invalid use-case */
938 * Initialize new libcfs hash, where:
939 * @name - Descriptive hash name
940 * @cur_bits - Initial hash table size, in bits
941 * @max_bits - Maximum allowed hash table resize, in bits
942 * @ops - Registered hash table operations
943 * @flags - CFS_HASH_REHASH enable synamic hash resizing
944 * - CFS_HASH_SORT enable chained hash sort
946 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
948 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
949 static int cfs_hash_dep_print(struct cfs_workitem *wi)
951 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
957 spin_lock(&hs->hs_dep_lock);
958 dep = hs->hs_dep_max;
959 bkt = hs->hs_dep_bkt;
960 off = hs->hs_dep_off;
961 bits = hs->hs_dep_bits;
962 spin_unlock(&hs->hs_dep_lock);
964 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
965 hs->hs_name, bits, dep, bkt, off);
966 spin_lock(&hs->hs_dep_lock);
967 hs->hs_dep_bits = 0; /* mark as workitem done */
968 spin_unlock(&hs->hs_dep_lock);
972 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
974 spin_lock_init(&hs->hs_dep_lock);
975 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
978 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
980 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
983 spin_lock(&hs->hs_dep_lock);
984 while (hs->hs_dep_bits != 0) {
985 spin_unlock(&hs->hs_dep_lock);
987 spin_lock(&hs->hs_dep_lock);
989 spin_unlock(&hs->hs_dep_lock);
992 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
994 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
995 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
997 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1000 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1001 unsigned bkt_bits, unsigned extra_bytes,
1002 unsigned min_theta, unsigned max_theta,
1003 struct cfs_hash_ops *ops, unsigned flags)
1005 struct cfs_hash *hs;
1010 CLASSERT(CFS_HASH_THETA_BITS < 15);
1012 LASSERT(name != NULL);
1013 LASSERT(ops != NULL);
1014 LASSERT(ops->hs_key);
1015 LASSERT(ops->hs_hash);
1016 LASSERT(ops->hs_object);
1017 LASSERT(ops->hs_keycmp);
1018 LASSERT(ops->hs_get != NULL);
1019 LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1021 if ((flags & CFS_HASH_REHASH) != 0)
1022 flags |= CFS_HASH_COUNTER; /* must have counter */
1024 LASSERT(cur_bits > 0);
1025 LASSERT(cur_bits >= bkt_bits);
1026 LASSERT(max_bits >= cur_bits && max_bits < 31);
1027 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1028 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1029 (flags & CFS_HASH_NO_LOCK) == 0));
1030 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1031 ops->hs_keycpy != NULL));
1033 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1034 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1035 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1039 strlcpy(hs->hs_name, name, len);
1040 hs->hs_flags = flags;
1042 atomic_set(&hs->hs_refcount, 1);
1043 atomic_set(&hs->hs_count, 0);
1045 cfs_hash_lock_setup(hs);
1046 cfs_hash_hlist_setup(hs);
1048 hs->hs_cur_bits = (__u8)cur_bits;
1049 hs->hs_min_bits = (__u8)cur_bits;
1050 hs->hs_max_bits = (__u8)max_bits;
1051 hs->hs_bkt_bits = (__u8)bkt_bits;
1054 hs->hs_extra_bytes = extra_bytes;
1055 hs->hs_rehash_bits = 0;
1056 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1057 cfs_hash_depth_wi_init(hs);
1059 if (cfs_hash_with_rehash(hs))
1060 __cfs_hash_set_theta(hs, min_theta, max_theta);
1062 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1064 if (hs->hs_buckets != NULL)
1067 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1070 EXPORT_SYMBOL(cfs_hash_create);
1073 * Cleanup libcfs hash @hs.
1076 cfs_hash_destroy(struct cfs_hash *hs)
1078 struct hlist_node *hnode;
1079 struct hlist_node *pos;
1080 struct cfs_hash_bd bd;
1084 LASSERT(hs != NULL);
1085 LASSERT(!cfs_hash_is_exiting(hs) &&
1086 !cfs_hash_is_iterating(hs));
1089 * prohibit further rehashes, don't need any lock because
1090 * I'm the only (last) one can change it.
1093 if (cfs_hash_with_rehash(hs))
1094 cfs_hash_rehash_cancel(hs);
1096 cfs_hash_depth_wi_cancel(hs);
1097 /* rehash should be done/canceled */
1098 LASSERT(hs->hs_buckets != NULL &&
1099 hs->hs_rehash_buckets == NULL);
1101 cfs_hash_for_each_bucket(hs, &bd, i) {
1102 struct hlist_head *hhead;
1104 LASSERT(bd.bd_bucket != NULL);
1105 /* no need to take this lock, just for consistent code */
1106 cfs_hash_bd_lock(hs, &bd, 1);
1108 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1109 hlist_for_each_safe(hnode, pos, hhead) {
1110 LASSERTF(!cfs_hash_with_assert_empty(hs),
1111 "hash %s bucket %u(%u) is not "
1112 " empty: %u items left\n",
1113 hs->hs_name, bd.bd_bucket->hsb_index,
1114 bd.bd_offset, bd.bd_bucket->hsb_count);
1115 /* can't assert key valicate, because we
1116 * can interrupt rehash */
1117 cfs_hash_bd_del_locked(hs, &bd, hnode);
1118 cfs_hash_exit(hs, hnode);
1121 LASSERT(bd.bd_bucket->hsb_count == 0);
1122 cfs_hash_bd_unlock(hs, &bd, 1);
1126 LASSERT(atomic_read(&hs->hs_count) == 0);
1128 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1129 0, CFS_HASH_NBKT(hs));
1130 i = cfs_hash_with_bigname(hs) ?
1131 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1132 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1137 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1139 if (atomic_inc_not_zero(&hs->hs_refcount))
1143 EXPORT_SYMBOL(cfs_hash_getref);
1145 void cfs_hash_putref(struct cfs_hash *hs)
1147 if (atomic_dec_and_test(&hs->hs_refcount))
1148 cfs_hash_destroy(hs);
1150 EXPORT_SYMBOL(cfs_hash_putref);
1153 cfs_hash_rehash_bits(struct cfs_hash *hs)
1155 if (cfs_hash_with_no_lock(hs) ||
1156 !cfs_hash_with_rehash(hs))
1159 if (unlikely(cfs_hash_is_exiting(hs)))
1162 if (unlikely(cfs_hash_is_rehashing(hs)))
1165 if (unlikely(cfs_hash_is_iterating(hs)))
1168 /* XXX: need to handle case with max_theta != 2.0
1169 * and the case with min_theta != 0.5 */
1170 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1171 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1172 return hs->hs_cur_bits + 1;
1174 if (!cfs_hash_with_shrink(hs))
1177 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1178 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1179 return hs->hs_cur_bits - 1;
1185 * don't allow inline rehash if:
1186 * - user wants non-blocking change (add/del) on hash table
1187 * - too many elements
1190 cfs_hash_rehash_inline(struct cfs_hash *hs)
1192 return !cfs_hash_with_nblk_change(hs) &&
1193 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1197 * Add item @hnode to libcfs hash @hs using @key. The registered
1198 * ops->hs_get function will be called when the item is added.
1201 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1203 struct cfs_hash_bd bd;
1206 LASSERT(hlist_unhashed(hnode));
1208 cfs_hash_lock(hs, 0);
1209 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1211 cfs_hash_key_validate(hs, key, hnode);
1212 cfs_hash_bd_add_locked(hs, &bd, hnode);
1214 cfs_hash_bd_unlock(hs, &bd, 1);
1216 bits = cfs_hash_rehash_bits(hs);
1217 cfs_hash_unlock(hs, 0);
1219 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1221 EXPORT_SYMBOL(cfs_hash_add);
1223 static struct hlist_node *
1224 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1225 struct hlist_node *hnode, int noref)
1227 struct hlist_node *ehnode;
1228 struct cfs_hash_bd bds[2];
1231 LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1233 cfs_hash_lock(hs, 0);
1234 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1236 cfs_hash_key_validate(hs, key, hnode);
1237 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1239 cfs_hash_dual_bd_unlock(hs, bds, 1);
1241 if (ehnode == hnode) /* new item added */
1242 bits = cfs_hash_rehash_bits(hs);
1243 cfs_hash_unlock(hs, 0);
1245 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1251 * Add item @hnode to libcfs hash @hs using @key. The registered
1252 * ops->hs_get function will be called if the item was added.
1253 * Returns 0 on success or -EALREADY on key collisions.
1256 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1257 struct hlist_node *hnode)
1259 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1262 EXPORT_SYMBOL(cfs_hash_add_unique);
1265 * Add item @hnode to libcfs hash @hs using @key. If this @key
1266 * already exists in the hash then ops->hs_get will be called on the
1267 * conflicting entry and that entry will be returned to the caller.
1268 * Otherwise ops->hs_get is called on the item which was added.
1271 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1272 struct hlist_node *hnode)
1274 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1276 return cfs_hash_object(hs, hnode);
1278 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1281 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1282 * is required to ensure the correct hash bucket is locked since there
1283 * is no direct linkage from the item to the bucket. The object
1284 * removed from the hash will be returned and obs->hs_put is called
1285 * on the removed object.
1288 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1292 struct cfs_hash_bd bds[2];
1294 cfs_hash_lock(hs, 0);
1295 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1297 /* NB: do nothing if @hnode is not in hash table */
1298 if (hnode == NULL || !hlist_unhashed(hnode)) {
1299 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1300 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1302 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1307 if (hnode != NULL) {
1308 obj = cfs_hash_object(hs, hnode);
1309 bits = cfs_hash_rehash_bits(hs);
1312 cfs_hash_dual_bd_unlock(hs, bds, 1);
1313 cfs_hash_unlock(hs, 0);
1315 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1319 EXPORT_SYMBOL(cfs_hash_del);
1322 * Delete item given @key in libcfs hash @hs. The first @key found in
1323 * the hash will be removed, if the key exists multiple times in the hash
1324 * @hs this function must be called once per key. The removed object
1325 * will be returned and ops->hs_put is called on the removed object.
1328 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1330 return cfs_hash_del(hs, key, NULL);
1332 EXPORT_SYMBOL(cfs_hash_del_key);
1335 * Lookup an item using @key in the libcfs hash @hs and return it.
1336 * If the @key is found in the hash hs->hs_get() is called and the
1337 * matching objects is returned. It is the callers responsibility
1338 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1339 * when when finished with the object. If the @key was not found
1340 * in the hash @hs NULL is returned.
1343 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1346 struct hlist_node *hnode;
1347 struct cfs_hash_bd bds[2];
1349 cfs_hash_lock(hs, 0);
1350 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1352 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1354 obj = cfs_hash_object(hs, hnode);
1356 cfs_hash_dual_bd_unlock(hs, bds, 0);
1357 cfs_hash_unlock(hs, 0);
1361 EXPORT_SYMBOL(cfs_hash_lookup);
1364 cfs_hash_for_each_enter(struct cfs_hash *hs)
1366 LASSERT(!cfs_hash_is_exiting(hs));
1368 if (!cfs_hash_with_rehash(hs))
1371 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1372 * because it's just an unreliable signal to rehash-thread,
1373 * rehash-thread will try to finish rehash ASAP when seeing this.
1375 hs->hs_iterating = 1;
1377 cfs_hash_lock(hs, 1);
1380 /* NB: iteration is mostly called by service thread,
1381 * we tend to cancel pending rehash-request, instead of
1382 * blocking service thread, we will relaunch rehash request
1383 * after iteration */
1384 if (cfs_hash_is_rehashing(hs))
1385 cfs_hash_rehash_cancel_locked(hs);
1386 cfs_hash_unlock(hs, 1);
1390 cfs_hash_for_each_exit(struct cfs_hash *hs)
1395 if (!cfs_hash_with_rehash(hs))
1397 cfs_hash_lock(hs, 1);
1398 remained = --hs->hs_iterators;
1399 bits = cfs_hash_rehash_bits(hs);
1400 cfs_hash_unlock(hs, 1);
1401 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1403 hs->hs_iterating = 0;
1405 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1411 * For each item in the libcfs hash @hs call the passed callback @func
1412 * and pass to it as an argument each hash item and the private @data.
1414 * a) the function may sleep!
1415 * b) during the callback:
1416 * . the bucket lock is held so the callback must never sleep.
1417 * . if @removal_safe is true, use can remove current item by
1418 * cfs_hash_bd_del_locked
1421 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1422 void *data, int remove_safe)
1424 struct hlist_node *hnode;
1425 struct hlist_node *pos;
1426 struct cfs_hash_bd bd;
1428 int excl = !!remove_safe;
1433 cfs_hash_for_each_enter(hs);
1435 cfs_hash_lock(hs, 0);
1436 LASSERT(!cfs_hash_is_rehashing(hs));
1438 cfs_hash_for_each_bucket(hs, &bd, i) {
1439 struct hlist_head *hhead;
1441 cfs_hash_bd_lock(hs, &bd, excl);
1442 if (func == NULL) { /* only glimpse size */
1443 count += bd.bd_bucket->hsb_count;
1444 cfs_hash_bd_unlock(hs, &bd, excl);
1448 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1449 hlist_for_each_safe(hnode, pos, hhead) {
1450 cfs_hash_bucket_validate(hs, &bd, hnode);
1453 if (func(hs, &bd, hnode, data)) {
1454 cfs_hash_bd_unlock(hs, &bd, excl);
1459 cfs_hash_bd_unlock(hs, &bd, excl);
1460 if (loop < CFS_HASH_LOOP_HOG)
1463 cfs_hash_unlock(hs, 0);
1465 cfs_hash_lock(hs, 0);
1468 cfs_hash_unlock(hs, 0);
1470 cfs_hash_for_each_exit(hs);
1474 struct cfs_hash_cond_arg {
1475 cfs_hash_cond_opt_cb_t func;
1480 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1481 struct hlist_node *hnode, void *data)
1483 struct cfs_hash_cond_arg *cond = data;
1485 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1486 cfs_hash_bd_del_locked(hs, bd, hnode);
1491 * Delete item from the libcfs hash @hs when @func return true.
1492 * The write lock being hold during loop for each bucket to avoid
1493 * any object be reference.
1496 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1498 struct cfs_hash_cond_arg arg = {
1503 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1505 EXPORT_SYMBOL(cfs_hash_cond_del);
1508 cfs_hash_for_each(struct cfs_hash *hs,
1509 cfs_hash_for_each_cb_t func, void *data)
1511 cfs_hash_for_each_tight(hs, func, data, 0);
1513 EXPORT_SYMBOL(cfs_hash_for_each);
1516 cfs_hash_for_each_safe(struct cfs_hash *hs,
1517 cfs_hash_for_each_cb_t func, void *data)
1519 cfs_hash_for_each_tight(hs, func, data, 1);
1521 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1524 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1525 struct hlist_node *hnode, void *data)
1528 return 1; /* return 1 to break the loop */
1532 cfs_hash_is_empty(struct cfs_hash *hs)
1536 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1539 EXPORT_SYMBOL(cfs_hash_is_empty);
1542 cfs_hash_size_get(struct cfs_hash *hs)
1544 return cfs_hash_with_counter(hs) ?
1545 atomic_read(&hs->hs_count) :
1546 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1548 EXPORT_SYMBOL(cfs_hash_size_get);
1551 * cfs_hash_for_each_relax:
1552 * Iterate the hash table and call @func on each item without
1553 * any lock. This function can't guarantee to finish iteration
1554 * if these features are enabled:
1556 * a. if rehash_key is enabled, an item can be moved from
1557 * one bucket to another bucket
1558 * b. user can remove non-zero-ref item from hash-table,
1559 * so the item can be removed from hash-table, even worse,
1560 * it's possible that user changed key and insert to another
1562 * there's no way for us to finish iteration correctly on previous
1563 * two cases, so iteration has to be stopped on change.
1566 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1567 void *data, int start)
1569 struct hlist_node *hnode;
1570 struct hlist_node *next = NULL;
1571 struct cfs_hash_bd bd;
1580 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1581 !cfs_hash_with_no_itemref(hs);
1582 has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1583 cfs_hash_lock(hs, 0);
1585 LASSERT(!cfs_hash_is_rehashing(hs));
1587 cfs_hash_for_each_bucket(hs, &bd, i) {
1588 struct hlist_head *hhead;
1592 else if (end > 0 && i >= end)
1595 cfs_hash_bd_lock(hs, &bd, 0);
1596 version = cfs_hash_bd_version_get(&bd);
1598 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1599 hnode = hhead->first;
1602 cfs_hash_get(hs, hnode);
1603 for (; hnode != NULL; hnode = next) {
1604 cfs_hash_bucket_validate(hs, &bd, hnode);
1607 cfs_hash_get(hs, next);
1608 cfs_hash_bd_unlock(hs, &bd, 0);
1609 cfs_hash_unlock(hs, 0);
1611 rc = func(hs, &bd, hnode, data);
1612 if (stop_on_change || !has_put_locked)
1613 cfs_hash_put(hs, hnode);
1618 cfs_hash_lock(hs, 0);
1619 cfs_hash_bd_lock(hs, &bd, 0);
1620 if (stop_on_change) {
1622 cfs_hash_bd_version_get(&bd))
1624 } else if (has_put_locked) {
1625 cfs_hash_put_locked(hs, hnode);
1627 if (rc) /* callback wants to break iteration */
1631 if (has_put_locked) {
1632 cfs_hash_put_locked(hs, next);
1636 } else if (rc != 0) {
1640 cfs_hash_bd_unlock(hs, &bd, 0);
1641 if (next != NULL && !has_put_locked) {
1642 cfs_hash_put(hs, next);
1645 if (rc) /* callback wants to break iteration */
1649 if (start > 0 && rc == 0) {
1655 cfs_hash_unlock(hs, 0);
1660 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1661 cfs_hash_for_each_cb_t func, void *data, int start)
1665 if (cfs_hash_with_no_lock(hs) ||
1666 cfs_hash_with_rehash_key(hs) ||
1667 !cfs_hash_with_no_itemref(hs))
1668 RETURN(-EOPNOTSUPP);
1670 if (hs->hs_ops->hs_get == NULL ||
1671 (hs->hs_ops->hs_put == NULL &&
1672 hs->hs_ops->hs_put_locked == NULL))
1673 RETURN(-EOPNOTSUPP);
1675 cfs_hash_for_each_enter(hs);
1676 cfs_hash_for_each_relax(hs, func, data, start);
1677 cfs_hash_for_each_exit(hs);
1681 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1684 * For each hash bucket in the libcfs hash @hs call the passed callback
1685 * @func until all the hash buckets are empty. The passed callback @func
1686 * or the previously registered callback hs->hs_put must remove the item
1687 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1688 * functions. No rwlocks will be held during the callback @func it is
1689 * safe to sleep if needed. This function will not terminate until the
1690 * hash is empty. Note it is still possible to concurrently add new
1691 * items in to the hash. It is the callers responsibility to ensure
1692 * the required locking is in place to prevent concurrent insertions.
1695 cfs_hash_for_each_empty(struct cfs_hash *hs,
1696 cfs_hash_for_each_cb_t func, void *data)
1701 if (cfs_hash_with_no_lock(hs))
1704 if (hs->hs_ops->hs_get == NULL ||
1705 (hs->hs_ops->hs_put == NULL &&
1706 hs->hs_ops->hs_put_locked == NULL))
1709 cfs_hash_for_each_enter(hs);
1710 while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1711 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1714 cfs_hash_for_each_exit(hs);
1717 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1720 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1721 cfs_hash_for_each_cb_t func, void *data)
1723 struct hlist_head *hhead;
1724 struct hlist_node *hnode;
1725 struct cfs_hash_bd bd;
1727 cfs_hash_for_each_enter(hs);
1728 cfs_hash_lock(hs, 0);
1729 if (hindex >= CFS_HASH_NHLIST(hs))
1732 cfs_hash_bd_index_set(hs, hindex, &bd);
1734 cfs_hash_bd_lock(hs, &bd, 0);
1735 hhead = cfs_hash_bd_hhead(hs, &bd);
1736 hlist_for_each(hnode, hhead) {
1737 if (func(hs, &bd, hnode, data))
1740 cfs_hash_bd_unlock(hs, &bd, 0);
1742 cfs_hash_unlock(hs, 0);
1743 cfs_hash_for_each_exit(hs);
1746 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1749 * For each item in the libcfs hash @hs which matches the @key call
1750 * the passed callback @func and pass to it as an argument each hash
1751 * item and the private @data. During the callback the bucket lock
1752 * is held so the callback must never sleep.
1755 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1756 cfs_hash_for_each_cb_t func, void *data)
1758 struct hlist_node *hnode;
1759 struct cfs_hash_bd bds[2];
1762 cfs_hash_lock(hs, 0);
1764 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1766 cfs_hash_for_each_bd(bds, 2, i) {
1767 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1769 hlist_for_each(hnode, hlist) {
1770 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1772 if (cfs_hash_keycmp(hs, key, hnode)) {
1773 if (func(hs, &bds[i], hnode, data))
1779 cfs_hash_dual_bd_unlock(hs, bds, 0);
1780 cfs_hash_unlock(hs, 0);
1782 EXPORT_SYMBOL(cfs_hash_for_each_key);
1785 * Rehash the libcfs hash @hs to the given @bits. This can be used
1786 * to grow the hash size when excessive chaining is detected, or to
1787 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1788 * flag is set in @hs the libcfs hash may be dynamically rehashed
1789 * during addition or removal if the hash's theta value exceeds
1790 * either the hs->hs_min_theta or hs->max_theta values. By default
1791 * these values are tuned to keep the chained hash depth small, and
1792 * this approach assumes a reasonably uniform hashing function. The
1793 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1796 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1800 /* need hold cfs_hash_lock(hs, 1) */
1801 LASSERT(cfs_hash_with_rehash(hs) &&
1802 !cfs_hash_with_no_lock(hs));
1804 if (!cfs_hash_is_rehashing(hs))
1807 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1808 hs->hs_rehash_bits = 0;
1812 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1813 cfs_hash_unlock(hs, 1);
1814 /* raise console warning while waiting too long */
1815 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1816 "hash %s is still rehashing, rescheded %d\n",
1817 hs->hs_name, i - 1);
1819 cfs_hash_lock(hs, 1);
1824 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1826 cfs_hash_lock(hs, 1);
1827 cfs_hash_rehash_cancel_locked(hs);
1828 cfs_hash_unlock(hs, 1);
1832 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1836 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1838 cfs_hash_lock(hs, 1);
1840 rc = cfs_hash_rehash_bits(hs);
1842 cfs_hash_unlock(hs, 1);
1846 hs->hs_rehash_bits = rc;
1848 /* launch and return */
1849 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1850 cfs_hash_unlock(hs, 1);
1854 /* rehash right now */
1855 cfs_hash_unlock(hs, 1);
1857 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1861 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1863 struct cfs_hash_bd new;
1864 struct hlist_head *hhead;
1865 struct hlist_node *hnode;
1866 struct hlist_node *pos;
1870 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1871 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1872 hlist_for_each_safe(hnode, pos, hhead) {
1873 key = cfs_hash_key(hs, hnode);
1874 LASSERT(key != NULL);
1875 /* Validate hnode is in the correct bucket. */
1876 cfs_hash_bucket_validate(hs, old, hnode);
1878 * Delete from old hash bucket; move to new bucket.
1879 * ops->hs_key must be defined.
1881 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1882 hs->hs_rehash_bits, key, &new);
1883 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1891 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1893 struct cfs_hash *hs =
1894 container_of(wi, struct cfs_hash, hs_rehash_wi);
1895 struct cfs_hash_bucket **bkts;
1896 struct cfs_hash_bd bd;
1897 unsigned int old_size;
1898 unsigned int new_size;
1904 LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1906 cfs_hash_lock(hs, 0);
1907 LASSERT(cfs_hash_is_rehashing(hs));
1909 old_size = CFS_HASH_NBKT(hs);
1910 new_size = CFS_HASH_RH_NBKT(hs);
1912 cfs_hash_unlock(hs, 0);
1915 * don't need hs::hs_rwlock for hs::hs_buckets,
1916 * because nobody can change bkt-table except me.
1918 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1919 old_size, new_size);
1920 cfs_hash_lock(hs, 1);
1926 if (bkts == hs->hs_buckets) {
1927 bkts = NULL; /* do nothing */
1931 rc = __cfs_hash_theta(hs);
1932 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1933 /* free the new allocated bkt-table */
1934 old_size = new_size;
1935 new_size = CFS_HASH_NBKT(hs);
1940 LASSERT(hs->hs_rehash_buckets == NULL);
1941 hs->hs_rehash_buckets = bkts;
1944 cfs_hash_for_each_bucket(hs, &bd, i) {
1945 if (cfs_hash_is_exiting(hs)) {
1947 /* someone wants to destroy the hash, abort now */
1948 if (old_size < new_size) /* OK to free old bkt-table */
1950 /* it's shrinking, need free new bkt-table */
1951 hs->hs_rehash_buckets = NULL;
1952 old_size = new_size;
1953 new_size = CFS_HASH_NBKT(hs);
1957 count += cfs_hash_rehash_bd(hs, &bd);
1958 if (count < CFS_HASH_LOOP_HOG ||
1959 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1964 cfs_hash_unlock(hs, 1);
1966 cfs_hash_lock(hs, 1);
1969 hs->hs_rehash_count++;
1971 bkts = hs->hs_buckets;
1972 hs->hs_buckets = hs->hs_rehash_buckets;
1973 hs->hs_rehash_buckets = NULL;
1975 hs->hs_cur_bits = hs->hs_rehash_bits;
1977 hs->hs_rehash_bits = 0;
1978 if (rc == -ESRCH) /* never be scheduled again */
1979 cfs_wi_exit(cfs_sched_rehash, wi);
1980 bsize = cfs_hash_bkt_size(hs);
1981 cfs_hash_unlock(hs, 1);
1982 /* can't refer to @hs anymore because it could be destroyed */
1984 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1986 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1987 /* return 1 only if cfs_wi_exit is called */
1988 return rc == -ESRCH;
1992 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1993 * @old_key must be provided to locate the objects previous location
1994 * in the hash, and the @new_key will be used to reinsert the object.
1995 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1996 * combo when it is critical that there is no window in time where the
1997 * object is missing from the hash. When an object is being rehashed
1998 * the registered cfs_hash_get() and cfs_hash_put() functions will
2001 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2002 void *new_key, struct hlist_node *hnode)
2004 struct cfs_hash_bd bds[3];
2005 struct cfs_hash_bd old_bds[2];
2006 struct cfs_hash_bd new_bd;
2008 LASSERT(!hlist_unhashed(hnode));
2010 cfs_hash_lock(hs, 0);
2012 cfs_hash_dual_bd_get(hs, old_key, old_bds);
2013 cfs_hash_bd_get(hs, new_key, &new_bd);
2015 bds[0] = old_bds[0];
2016 bds[1] = old_bds[1];
2019 /* NB: bds[0] and bds[1] are ordered already */
2020 cfs_hash_bd_order(&bds[1], &bds[2]);
2021 cfs_hash_bd_order(&bds[0], &bds[1]);
2023 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2024 if (likely(old_bds[1].bd_bucket == NULL)) {
2025 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2027 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2028 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2030 /* overwrite key inside locks, otherwise may screw up with
2031 * other operations, i.e: rehash */
2032 cfs_hash_keycpy(hs, hnode, new_key);
2034 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2035 cfs_hash_unlock(hs, 0);
2037 EXPORT_SYMBOL(cfs_hash_rehash_key);
2039 void cfs_hash_debug_header(struct seq_file *m)
2041 seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
2042 CFS_HASH_BIGNAME_LEN, "name");
2044 EXPORT_SYMBOL(cfs_hash_debug_header);
2046 static struct cfs_hash_bucket **
2047 cfs_hash_full_bkts(struct cfs_hash *hs)
2049 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2050 if (hs->hs_rehash_buckets == NULL)
2051 return hs->hs_buckets;
2053 LASSERT(hs->hs_rehash_bits != 0);
2054 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2055 hs->hs_rehash_buckets : hs->hs_buckets;
2059 cfs_hash_full_nbkt(struct cfs_hash *hs)
2061 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2062 if (hs->hs_rehash_buckets == NULL)
2063 return CFS_HASH_NBKT(hs);
2065 LASSERT(hs->hs_rehash_bits != 0);
2066 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2067 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2070 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2072 int dist[8] = { 0, };
2079 cfs_hash_lock(hs, 0);
2080 theta = __cfs_hash_theta(hs);
2082 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2083 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2084 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2085 1 << hs->hs_max_bits,
2086 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2087 __cfs_hash_theta_int(hs->hs_min_theta),
2088 __cfs_hash_theta_frac(hs->hs_min_theta),
2089 __cfs_hash_theta_int(hs->hs_max_theta),
2090 __cfs_hash_theta_frac(hs->hs_max_theta),
2091 hs->hs_flags, hs->hs_rehash_count);
2094 * The distribution is a summary of the chained hash depth in
2095 * each of the libcfs hash buckets. Each buckets hsb_count is
2096 * divided by the hash theta value and used to generate a
2097 * histogram of the hash distribution. A uniform hash will
2098 * result in all hash buckets being close to the average thus
2099 * only the first few entries in the histogram will be non-zero.
2100 * If you hash function results in a non-uniform hash the will
2101 * be observable by outlier bucks in the distribution histogram.
2103 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2104 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2106 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2107 struct cfs_hash_bd bd;
2109 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2110 cfs_hash_bd_lock(hs, &bd, 0);
2111 if (maxdep < bd.bd_bucket->hsb_depmax) {
2112 maxdep = bd.bd_bucket->hsb_depmax;
2113 maxdepb = ffz(~maxdep);
2115 total += bd.bd_bucket->hsb_count;
2116 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2117 cfs_hash_bd_unlock(hs, &bd, 0);
2120 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2121 for (i = 0; i < 8; i++)
2122 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2124 cfs_hash_unlock(hs, 0);
2126 EXPORT_SYMBOL(cfs_hash_debug_str);