4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * libcfs/libcfs/hash.c
33 * Implement a hash class for hash process in lustre system.
35 * Author: YuZhangyong <yzy@clusterfs.com>
37 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
38 * - Simplified API and improved documentation
39 * - Added per-hash feature flags:
40 * * CFS_HASH_DEBUG additional validation
41 * * CFS_HASH_REHASH dynamic rehashing
42 * - Added per-hash statistics
43 * - General performance enhancements
45 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
46 * - move all stuff to libcfs
47 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
48 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
49 * - buckets are allocated one by one(instead of contiguous memory),
50 * to avoid unnecessary cacheline conflict
52 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
53 * - "bucket" is a group of hlist_head now, user can specify bucket size
54 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
55 * one lock for reducing memory overhead.
57 * - support lockless hash, caller will take care of locks:
58 * avoid lock overhead for hash tables that are already protected
59 * by locking in the caller for another reason
61 * - support both spin_lock/rwlock for bucket:
62 * overhead of spinlock contention is lower than read/write
63 * contention of rwlock, so using spinlock to serialize operations on
64 * bucket is more reasonable for those frequently changed hash tables
66 * - support one-single lock mode:
67 * one lock to protect all hash operations to avoid overhead of
68 * multiple locks if hash table is always small
70 * - removed a lot of unnecessary addref & decref on hash element:
71 * addref & decref are atomic operations in many use-cases which
74 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
75 * some lustre use-cases require these functions to be strictly
76 * non-blocking, we need to schedule required rehash on a different
77 * thread on those cases.
79 * - safer rehash on large hash table
80 * In old implementation, rehash function will exclusively lock the
81 * hash table and finish rehash in one batch, it's dangerous on SMP
82 * system because rehash millions of elements could take long time.
83 * New implemented rehash can release lock and relax CPU in middle
84 * of rehash, it's safe for another thread to search/change on the
85 * hash table even it's in rehasing.
87 * - support two different refcount modes
88 * . hash table has refcount on element
89 * . hash table doesn't change refcount on adding/removing element
91 * - support long name hash table (for param-tree)
93 * - fix a bug for cfs_hash_rehash_key:
94 * in old implementation, cfs_hash_rehash_key could screw up the
95 * hash-table because @key is overwritten without any protection.
96 * Now we need user to define hs_keycpy for those rehash enabled
97 * hash tables, cfs_hash_rehash_key will overwrite hash-key
98 * inside lock by calling hs_keycpy.
100 * - better hash iteration:
101 * Now we support both locked iteration & lockless iteration of hash
102 * table. Also, user can break the iteration by return 1 in callback.
104 #include <linux/seq_file.h>
105 #include <linux/log2.h>
107 #include <libcfs/linux/linux-list.h>
108 #include <libcfs/libcfs.h>
110 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
111 static unsigned int warn_on_depth = 8;
112 module_param(warn_on_depth, uint, 0644);
113 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
116 struct workqueue_struct *cfs_rehash_wq;
119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
126 __acquires(&lock->spin)
128 spin_lock(&lock->spin);
132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
133 __releases(&lock->spin)
135 spin_unlock(&lock->spin);
139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
140 __acquires(&lock->rw)
143 read_lock(&lock->rw);
145 write_lock(&lock->rw);
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150 __releases(&lock->rw)
153 read_unlock(&lock->rw);
155 write_unlock(&lock->rw);
159 cfs_hash_rw_sem_lock(union cfs_hash_lock *lock, int exclusive)
160 __acquires(&lock->rw_sem)
163 down_read(&lock->rw_sem);
165 down_write(&lock->rw_sem);
169 cfs_hash_rw_sem_unlock(union cfs_hash_lock *lock, int exclusive)
170 __releases(&lock->rw_sem)
173 up_read(&lock->rw_sem);
175 up_write(&lock->rw_sem);
179 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
180 .hs_lock = cfs_hash_nl_lock,
181 .hs_unlock = cfs_hash_nl_unlock,
182 .hs_bkt_lock = cfs_hash_nl_lock,
183 .hs_bkt_unlock = cfs_hash_nl_unlock,
186 /** no bucket lock, one spinlock to protect everything */
187 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
188 .hs_lock = cfs_hash_spin_lock,
189 .hs_unlock = cfs_hash_spin_unlock,
190 .hs_bkt_lock = cfs_hash_nl_lock,
191 .hs_bkt_unlock = cfs_hash_nl_unlock,
194 /** spin bucket lock, rehash is enabled */
195 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
196 .hs_lock = cfs_hash_rw_lock,
197 .hs_unlock = cfs_hash_rw_unlock,
198 .hs_bkt_lock = cfs_hash_spin_lock,
199 .hs_bkt_unlock = cfs_hash_spin_unlock,
202 /** rw bucket lock, rehash is enabled */
203 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
204 .hs_lock = cfs_hash_rw_lock,
205 .hs_unlock = cfs_hash_rw_unlock,
206 .hs_bkt_lock = cfs_hash_rw_lock,
207 .hs_bkt_unlock = cfs_hash_rw_unlock,
210 /** spin bucket lock, rehash is disabled */
211 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
212 .hs_lock = cfs_hash_nl_lock,
213 .hs_unlock = cfs_hash_nl_unlock,
214 .hs_bkt_lock = cfs_hash_spin_lock,
215 .hs_bkt_unlock = cfs_hash_spin_unlock,
218 /** rw bucket lock, rehash is disabled */
219 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
220 .hs_lock = cfs_hash_nl_lock,
221 .hs_unlock = cfs_hash_nl_unlock,
222 .hs_bkt_lock = cfs_hash_rw_lock,
223 .hs_bkt_unlock = cfs_hash_rw_unlock,
226 /** rw_sem bucket lock, rehash is disabled */
227 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_sem_lops = {
228 .hs_lock = cfs_hash_nl_lock,
229 .hs_unlock = cfs_hash_nl_unlock,
230 .hs_bkt_lock = cfs_hash_rw_sem_lock,
231 .hs_bkt_unlock = cfs_hash_rw_sem_unlock,
234 /** rw_sem bucket lock, rehash is enabled */
235 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_sem_lops = {
236 .hs_lock = cfs_hash_rw_sem_lock,
237 .hs_unlock = cfs_hash_rw_sem_unlock,
238 .hs_bkt_lock = cfs_hash_rw_sem_lock,
239 .hs_bkt_unlock = cfs_hash_rw_sem_unlock,
243 cfs_hash_lock_setup(struct cfs_hash *hs)
245 if (cfs_hash_with_no_lock(hs)) {
246 hs->hs_lops = &cfs_hash_nl_lops;
248 } else if (cfs_hash_with_no_bktlock(hs)) {
249 hs->hs_lops = &cfs_hash_nbl_lops;
250 spin_lock_init(&hs->hs_lock.spin);
252 } else if (cfs_hash_with_rehash(hs)) {
253 if (cfs_hash_with_rw_sem_bktlock(hs)) {
254 init_rwsem(&hs->hs_lock.rw_sem);
255 hs->hs_lops = &cfs_hash_bkt_rw_sem_lops;
257 rwlock_init(&hs->hs_lock.rw);
259 if (cfs_hash_with_rw_bktlock(hs))
260 hs->hs_lops = &cfs_hash_bkt_rw_lops;
261 else if (cfs_hash_with_spin_bktlock(hs))
262 hs->hs_lops = &cfs_hash_bkt_spin_lops;
267 if (cfs_hash_with_rw_bktlock(hs))
268 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
269 else if (cfs_hash_with_spin_bktlock(hs))
270 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
271 else if (cfs_hash_with_rw_sem_bktlock(hs))
272 hs->hs_lops = &cfs_hash_nr_bkt_rw_sem_lops;
279 * Simple hash head without depth tracking
280 * new element is always added to head of hlist
282 struct cfs_hash_head {
283 struct hlist_head hh_head; /**< entries list */
287 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
289 return sizeof(struct cfs_hash_head);
292 static struct hlist_head *
293 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
295 struct cfs_hash_head *head;
297 head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
298 return &head[bd->bd_offset].hh_head;
302 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303 struct hlist_node *hnode)
305 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
306 return -1; /* unknown depth */
310 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
311 struct hlist_node *hnode)
313 hlist_del_init(hnode);
314 return -1; /* unknown depth */
318 * Simple hash head with depth tracking
319 * new element is always added to head of hlist
321 struct cfs_hash_head_dep {
322 struct hlist_head hd_head; /**< entries list */
323 unsigned int hd_depth; /**< list length */
327 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
329 return sizeof(struct cfs_hash_head_dep);
332 static struct hlist_head *
333 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
335 struct cfs_hash_head_dep *head;
337 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
338 return &head[bd->bd_offset].hd_head;
342 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
343 struct hlist_node *hnode)
345 struct cfs_hash_head_dep *hh;
347 hh = container_of(cfs_hash_hd_hhead(hs, bd),
348 struct cfs_hash_head_dep, hd_head);
349 hlist_add_head(hnode, &hh->hd_head);
350 return ++hh->hd_depth;
354 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
355 struct hlist_node *hnode)
357 struct cfs_hash_head_dep *hh;
359 hh = container_of(cfs_hash_hd_hhead(hs, bd),
360 struct cfs_hash_head_dep, hd_head);
361 hlist_del_init(hnode);
362 return --hh->hd_depth;
366 * double links hash head without depth tracking
367 * new element is always added to tail of hlist
369 struct cfs_hash_dhead {
370 struct hlist_head dh_head; /**< entries list */
371 struct hlist_node *dh_tail; /**< the last entry */
375 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
377 return sizeof(struct cfs_hash_dhead);
380 static struct hlist_head *
381 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
383 struct cfs_hash_dhead *head;
385 head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
386 return &head[bd->bd_offset].dh_head;
390 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
391 struct hlist_node *hnode)
393 struct cfs_hash_dhead *dh;
395 dh = container_of(cfs_hash_dh_hhead(hs, bd),
396 struct cfs_hash_dhead, dh_head);
397 if (dh->dh_tail != NULL) /* not empty */
398 hlist_add_behind(hnode, dh->dh_tail);
399 else /* empty list */
400 hlist_add_head(hnode, &dh->dh_head);
402 return -1; /* unknown depth */
406 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
407 struct hlist_node *hnd)
409 struct cfs_hash_dhead *dh;
411 dh = container_of(cfs_hash_dh_hhead(hs, bd),
412 struct cfs_hash_dhead, dh_head);
413 if (hnd->next == NULL) { /* it's the tail */
414 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
415 container_of(hnd->pprev, struct hlist_node, next);
418 return -1; /* unknown depth */
422 * double links hash head with depth tracking
423 * new element is always added to tail of hlist
425 struct cfs_hash_dhead_dep {
426 struct hlist_head dd_head; /**< entries list */
427 struct hlist_node *dd_tail; /**< the last entry */
428 unsigned int dd_depth; /**< list length */
432 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
434 return sizeof(struct cfs_hash_dhead_dep);
437 static struct hlist_head *
438 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
440 struct cfs_hash_dhead_dep *head;
442 head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
443 return &head[bd->bd_offset].dd_head;
447 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
448 struct hlist_node *hnode)
450 struct cfs_hash_dhead_dep *dh;
452 dh = container_of(cfs_hash_dd_hhead(hs, bd),
453 struct cfs_hash_dhead_dep, dd_head);
454 if (dh->dd_tail != NULL) /* not empty */
455 hlist_add_behind(hnode, dh->dd_tail);
456 else /* empty list */
457 hlist_add_head(hnode, &dh->dd_head);
459 return ++dh->dd_depth;
463 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
464 struct hlist_node *hnd)
466 struct cfs_hash_dhead_dep *dh;
468 dh = container_of(cfs_hash_dd_hhead(hs, bd),
469 struct cfs_hash_dhead_dep, dd_head);
470 if (hnd->next == NULL) { /* it's the tail */
471 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
472 container_of(hnd->pprev, struct hlist_node, next);
475 return --dh->dd_depth;
478 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
479 .hop_hhead = cfs_hash_hh_hhead,
480 .hop_hhead_size = cfs_hash_hh_hhead_size,
481 .hop_hnode_add = cfs_hash_hh_hnode_add,
482 .hop_hnode_del = cfs_hash_hh_hnode_del,
485 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
486 .hop_hhead = cfs_hash_hd_hhead,
487 .hop_hhead_size = cfs_hash_hd_hhead_size,
488 .hop_hnode_add = cfs_hash_hd_hnode_add,
489 .hop_hnode_del = cfs_hash_hd_hnode_del,
492 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
493 .hop_hhead = cfs_hash_dh_hhead,
494 .hop_hhead_size = cfs_hash_dh_hhead_size,
495 .hop_hnode_add = cfs_hash_dh_hnode_add,
496 .hop_hnode_del = cfs_hash_dh_hnode_del,
499 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
500 .hop_hhead = cfs_hash_dd_hhead,
501 .hop_hhead_size = cfs_hash_dd_hhead_size,
502 .hop_hnode_add = cfs_hash_dd_hnode_add,
503 .hop_hnode_del = cfs_hash_dd_hnode_del,
507 cfs_hash_hlist_setup(struct cfs_hash *hs)
509 if (cfs_hash_with_add_tail(hs)) {
510 hs->hs_hops = cfs_hash_with_depth(hs) ?
511 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
513 hs->hs_hops = cfs_hash_with_depth(hs) ?
514 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
519 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
520 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
522 unsigned int index = cfs_hash_id(hs, key, bits);
524 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
526 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
527 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
531 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
533 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
534 if (likely(hs->hs_rehash_buckets == NULL)) {
535 cfs_hash_bd_from_key(hs, hs->hs_buckets,
536 hs->hs_cur_bits, key, bd);
538 LASSERT(hs->hs_rehash_bits != 0);
539 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
540 hs->hs_rehash_bits, key, bd);
543 EXPORT_SYMBOL(cfs_hash_bd_get);
546 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
548 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
551 bd->bd_bucket->hsb_depmax = dep_cur;
552 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
553 if (likely(warn_on_depth == 0 ||
554 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
557 spin_lock(&hs->hs_dep_lock);
558 hs->hs_dep_max = dep_cur;
559 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
560 hs->hs_dep_off = bd->bd_offset;
561 hs->hs_dep_bits = hs->hs_cur_bits;
562 spin_unlock(&hs->hs_dep_lock);
564 queue_work(cfs_rehash_wq, &hs->hs_dep_work);
569 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
570 struct hlist_node *hnode)
574 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
575 cfs_hash_bd_dep_record(hs, bd, rc);
576 bd->bd_bucket->hsb_version++;
577 if (unlikely(bd->bd_bucket->hsb_version == 0))
578 bd->bd_bucket->hsb_version++;
579 bd->bd_bucket->hsb_count++;
581 if (cfs_hash_with_counter(hs))
582 atomic_inc(&hs->hs_count);
583 if (!cfs_hash_with_no_itemref(hs))
584 cfs_hash_get(hs, hnode);
586 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
589 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
590 struct hlist_node *hnode)
592 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
594 LASSERT(bd->bd_bucket->hsb_count > 0);
595 bd->bd_bucket->hsb_count--;
596 bd->bd_bucket->hsb_version++;
597 if (unlikely(bd->bd_bucket->hsb_version == 0))
598 bd->bd_bucket->hsb_version++;
600 if (cfs_hash_with_counter(hs)) {
601 LASSERT(atomic_read(&hs->hs_count) > 0);
602 atomic_dec(&hs->hs_count);
604 if (!cfs_hash_with_no_itemref(hs))
605 cfs_hash_put_locked(hs, hnode);
607 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
610 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
611 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
613 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
614 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
617 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
620 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
621 * in cfs_hash_bd_del/add_locked */
622 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
623 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
624 cfs_hash_bd_dep_record(hs, bd_new, rc);
626 LASSERT(obkt->hsb_count > 0);
629 if (unlikely(obkt->hsb_version == 0))
633 if (unlikely(nbkt->hsb_version == 0))
638 /** always set, for sanity (avoid ZERO intent) */
639 CFS_HS_LOOKUP_MASK_FIND = BIT(0),
640 /** return entry with a ref */
641 CFS_HS_LOOKUP_MASK_REF = BIT(1),
642 /** add entry if not existing */
643 CFS_HS_LOOKUP_MASK_ADD = BIT(2),
644 /** delete entry, ignore other masks */
645 CFS_HS_LOOKUP_MASK_DEL = BIT(3),
648 enum cfs_hash_lookup_intent {
649 /** return item w/o refcount */
650 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
651 /** return item with refcount */
652 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
653 CFS_HS_LOOKUP_MASK_REF),
654 /** return item w/o refcount if existed, otherwise add */
655 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
656 CFS_HS_LOOKUP_MASK_ADD),
657 /** return item with refcount if existed, otherwise add */
658 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
659 CFS_HS_LOOKUP_MASK_ADD),
660 /** delete if existed */
661 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
662 CFS_HS_LOOKUP_MASK_DEL)
665 static struct hlist_node *
666 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
667 const void *key, struct hlist_node *hnode,
668 enum cfs_hash_lookup_intent intent)
671 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
672 struct hlist_node *ehnode;
673 struct hlist_node *match;
674 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
676 /* with this function, we can avoid a lot of useless refcount ops,
677 * which are expensive atomic operations most time. */
678 match = intent_add ? NULL : hnode;
679 hlist_for_each(ehnode, hhead) {
680 if (!cfs_hash_keycmp(hs, key, ehnode))
683 if (match != NULL && match != ehnode) /* can't match */
687 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
688 cfs_hash_bd_del_locked(hs, bd, ehnode);
692 /* caller wants refcount? */
693 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
694 cfs_hash_get(hs, ehnode);
701 LASSERT(hnode != NULL);
702 cfs_hash_bd_add_locked(hs, bd, hnode);
707 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
710 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
711 CFS_HS_LOOKUP_IT_FIND);
713 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
716 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
719 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
720 CFS_HS_LOOKUP_IT_PEEK);
722 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
725 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
726 unsigned n, int excl)
728 struct cfs_hash_bucket *prev = NULL;
732 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
733 * NB: it's possible that several bds point to the same bucket but
734 * have different bd::bd_offset, so need take care of deadlock.
736 cfs_hash_for_each_bd(bds, n, i) {
737 if (prev == bds[i].bd_bucket)
740 LASSERT(prev == NULL ||
741 prev->hsb_index < bds[i].bd_bucket->hsb_index);
742 cfs_hash_bd_lock(hs, &bds[i], excl);
743 prev = bds[i].bd_bucket;
748 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
749 unsigned n, int excl)
751 struct cfs_hash_bucket *prev = NULL;
754 cfs_hash_for_each_bd(bds, n, i) {
755 if (prev != bds[i].bd_bucket) {
756 cfs_hash_bd_unlock(hs, &bds[i], excl);
757 prev = bds[i].bd_bucket;
762 static struct hlist_node *
763 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
764 unsigned n, const void *key)
766 struct hlist_node *ehnode;
769 cfs_hash_for_each_bd(bds, n, i) {
770 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
771 CFS_HS_LOOKUP_IT_FIND);
778 static struct hlist_node *
779 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
780 unsigned n, const void *key,
781 struct hlist_node *hnode, int noref)
783 struct hlist_node *ehnode;
787 LASSERT(hnode != NULL);
788 intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
790 cfs_hash_for_each_bd(bds, n, i) {
791 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
797 if (i == 1) { /* only one bucket */
798 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
800 struct cfs_hash_bd mybd;
802 cfs_hash_bd_get(hs, key, &mybd);
803 cfs_hash_bd_add_locked(hs, &mybd, hnode);
809 static struct hlist_node *
810 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
811 unsigned n, const void *key,
812 struct hlist_node *hnode)
814 struct hlist_node *ehnode;
817 cfs_hash_for_each_bd(bds, n, i) {
818 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
819 CFS_HS_LOOKUP_IT_FINDDEL);
827 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
831 if (bd2->bd_bucket == NULL)
834 if (bd1->bd_bucket == NULL) {
836 bd2->bd_bucket = NULL;
840 rc = cfs_hash_bd_compare(bd1, bd2);
842 bd2->bd_bucket = NULL;
845 swap(*bd1, *bd2); /* swab bd1 and bd2 */
850 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
851 struct cfs_hash_bd *bds)
853 /* NB: caller should hold hs_lock.rw if REHASH is set */
854 cfs_hash_bd_from_key(hs, hs->hs_buckets,
855 hs->hs_cur_bits, key, &bds[0]);
856 if (likely(hs->hs_rehash_buckets == NULL)) {
857 /* no rehash or not rehashing */
858 bds[1].bd_bucket = NULL;
862 LASSERT(hs->hs_rehash_bits != 0);
863 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
864 hs->hs_rehash_bits, key, &bds[1]);
866 cfs_hash_bd_order(&bds[0], &bds[1]);
870 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
872 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
876 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
878 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
882 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
885 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
889 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
890 const void *key, struct hlist_node *hnode,
893 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
898 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
899 const void *key, struct hlist_node *hnode)
901 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
905 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
906 int bkt_size, int prev_size, int size)
910 for (i = prev_size; i < size; i++) {
911 if (buckets[i] != NULL)
912 LIBCFS_FREE(buckets[i], bkt_size);
915 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
919 * Create or grow bucket memory. Return old_buckets if no allocation was
920 * needed, the newly allocated buckets if allocation was needed and
921 * successful, and NULL on error.
923 static struct cfs_hash_bucket **
924 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
925 unsigned int old_size, unsigned int new_size)
927 struct cfs_hash_bucket **new_bkts;
930 LASSERT(old_size == 0 || old_bkts != NULL);
932 if (old_bkts != NULL && old_size == new_size)
935 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
936 if (new_bkts == NULL)
939 if (old_bkts != NULL) {
940 memcpy(new_bkts, old_bkts,
941 min(old_size, new_size) * sizeof(*old_bkts));
944 for (i = old_size; i < new_size; i++) {
945 struct hlist_head *hhead;
946 struct cfs_hash_bd bd;
948 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
949 if (new_bkts[i] == NULL) {
950 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
955 new_bkts[i]->hsb_index = i;
956 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
957 new_bkts[i]->hsb_depmax = -1; /* unknown */
958 bd.bd_bucket = new_bkts[i];
959 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
960 INIT_HLIST_HEAD(hhead);
962 if (cfs_hash_with_no_lock(hs) ||
963 cfs_hash_with_no_bktlock(hs))
966 if (cfs_hash_with_rw_bktlock(hs))
967 rwlock_init(&new_bkts[i]->hsb_lock.rw);
968 else if (cfs_hash_with_spin_bktlock(hs))
969 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
970 else if (cfs_hash_with_rw_sem_bktlock(hs))
971 init_rwsem(&new_bkts[i]->hsb_lock.rw_sem);
973 LBUG(); /* invalid use-case */
979 * Initialize new libcfs hash, where:
980 * @name - Descriptive hash name
981 * @cur_bits - Initial hash table size, in bits
982 * @max_bits - Maximum allowed hash table resize, in bits
983 * @ops - Registered hash table operations
984 * @flags - CFS_HASH_REHASH enable synamic hash resizing
985 * - CFS_HASH_SORT enable chained hash sort
987 static void cfs_hash_rehash_worker(struct work_struct *work);
989 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
990 static void cfs_hash_dep_print(struct work_struct *work)
992 struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
998 spin_lock(&hs->hs_dep_lock);
999 dep = hs->hs_dep_max;
1000 bkt = hs->hs_dep_bkt;
1001 off = hs->hs_dep_off;
1002 bits = hs->hs_dep_bits;
1003 spin_unlock(&hs->hs_dep_lock);
1005 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
1006 hs->hs_name, bits, dep, bkt, off);
1007 spin_lock(&hs->hs_dep_lock);
1008 hs->hs_dep_bits = 0; /* mark as workitem done */
1009 spin_unlock(&hs->hs_dep_lock);
1013 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
1015 spin_lock_init(&hs->hs_dep_lock);
1016 INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
1019 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1021 cancel_work_sync(&hs->hs_dep_work);
1024 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1026 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1027 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1029 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1032 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1033 unsigned bkt_bits, unsigned extra_bytes,
1034 unsigned min_theta, unsigned max_theta,
1035 struct cfs_hash_ops *ops, unsigned flags)
1037 struct cfs_hash *hs;
1042 BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
1044 LASSERT(name != NULL);
1045 LASSERT(ops != NULL);
1046 LASSERT(ops->hs_key);
1047 LASSERT(ops->hs_hash);
1048 LASSERT(ops->hs_object);
1049 LASSERT(ops->hs_keycmp);
1050 LASSERT(ops->hs_get != NULL);
1051 LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1053 if ((flags & CFS_HASH_REHASH) != 0)
1054 flags |= CFS_HASH_COUNTER; /* must have counter */
1056 LASSERT(cur_bits > 0);
1057 LASSERT(cur_bits >= bkt_bits);
1058 LASSERT(max_bits >= cur_bits && max_bits < 31);
1059 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1060 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1061 (flags & CFS_HASH_NO_LOCK) == 0));
1062 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1063 ops->hs_keycpy != NULL));
1065 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1066 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1067 LIBCFS_ALLOC(hs, sizeof(struct cfs_hash) + len);
1071 strscpy(hs->hs_name, name, len);
1072 hs->hs_flags = flags;
1074 kref_init(&hs->hs_refcount);
1075 atomic_set(&hs->hs_count, 0);
1077 cfs_hash_lock_setup(hs);
1078 cfs_hash_hlist_setup(hs);
1080 hs->hs_cur_bits = (__u8)cur_bits;
1081 hs->hs_min_bits = (__u8)cur_bits;
1082 hs->hs_max_bits = (__u8)max_bits;
1083 hs->hs_bkt_bits = (__u8)bkt_bits;
1086 hs->hs_extra_bytes = extra_bytes;
1087 hs->hs_rehash_bits = 0;
1088 INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
1089 cfs_hash_depth_wi_init(hs);
1091 if (cfs_hash_with_rehash(hs))
1092 __cfs_hash_set_theta(hs, min_theta, max_theta);
1094 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1096 if (hs->hs_buckets != NULL)
1099 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1102 EXPORT_SYMBOL(cfs_hash_create);
1105 * Cleanup libcfs hash @hs.
1108 cfs_hash_destroy(struct kref *kref)
1110 struct cfs_hash *hs = container_of(kref, struct cfs_hash, hs_refcount);
1111 struct hlist_node *hnode;
1112 struct hlist_node *pos;
1113 struct cfs_hash_bd bd;
1117 LASSERT(hs != NULL);
1118 LASSERT(!cfs_hash_is_exiting(hs) &&
1119 !cfs_hash_is_iterating(hs));
1122 * prohibit further rehashes, don't need any lock because
1123 * I'm the only (last) one can change it.
1126 if (cfs_hash_with_rehash(hs))
1127 cfs_hash_rehash_cancel(hs);
1129 cfs_hash_depth_wi_cancel(hs);
1130 /* rehash should be done/canceled */
1131 LASSERT(hs->hs_buckets != NULL &&
1132 hs->hs_rehash_buckets == NULL);
1134 cfs_hash_for_each_bucket(hs, &bd, i) {
1135 struct hlist_head *hhead;
1137 LASSERT(bd.bd_bucket != NULL);
1138 /* no need to take this lock, just for consistent code */
1139 cfs_hash_bd_lock(hs, &bd, 1);
1141 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1142 hlist_for_each_safe(hnode, pos, hhead) {
1143 LASSERTF(!cfs_hash_with_assert_empty(hs),
1144 "hash %s bucket %u(%u) is not "
1145 " empty: %u items left\n",
1146 hs->hs_name, bd.bd_bucket->hsb_index,
1147 bd.bd_offset, bd.bd_bucket->hsb_count);
1148 /* can't assert key valicate, because we
1149 * can interrupt rehash */
1150 cfs_hash_bd_del_locked(hs, &bd, hnode);
1151 cfs_hash_exit(hs, hnode);
1154 LASSERT(bd.bd_bucket->hsb_count == 0);
1155 cfs_hash_bd_unlock(hs, &bd, 1);
1159 LASSERT(atomic_read(&hs->hs_count) == 0);
1161 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1162 0, CFS_HASH_NBKT(hs));
1163 i = cfs_hash_with_bigname(hs) ?
1164 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1165 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1170 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1172 if (kref_get_unless_zero(&hs->hs_refcount))
1176 EXPORT_SYMBOL(cfs_hash_getref);
1178 void cfs_hash_putref(struct cfs_hash *hs)
1180 kref_put(&hs->hs_refcount, cfs_hash_destroy);
1182 EXPORT_SYMBOL(cfs_hash_putref);
1185 cfs_hash_rehash_bits(struct cfs_hash *hs)
1187 if (cfs_hash_with_no_lock(hs) ||
1188 !cfs_hash_with_rehash(hs))
1191 if (unlikely(cfs_hash_is_exiting(hs)))
1194 if (unlikely(cfs_hash_is_rehashing(hs)))
1197 if (unlikely(cfs_hash_is_iterating(hs)))
1200 /* XXX: need to handle case with max_theta != 2.0
1201 * and the case with min_theta != 0.5 */
1202 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1203 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1204 return hs->hs_cur_bits + 1;
1206 if (!cfs_hash_with_shrink(hs))
1209 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1210 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1211 return hs->hs_cur_bits - 1;
1217 * don't allow inline rehash if:
1218 * - user wants non-blocking change (add/del) on hash table
1219 * - too many elements
1222 cfs_hash_rehash_inline(struct cfs_hash *hs)
1224 return !cfs_hash_with_nblk_change(hs) &&
1225 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1229 * Add item @hnode to libcfs hash @hs using @key. The registered
1230 * ops->hs_get function will be called when the item is added.
1233 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1235 struct cfs_hash_bd bd;
1238 LASSERT(hlist_unhashed(hnode));
1240 cfs_hash_lock(hs, 0);
1241 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1243 cfs_hash_key_validate(hs, key, hnode);
1244 cfs_hash_bd_add_locked(hs, &bd, hnode);
1246 cfs_hash_bd_unlock(hs, &bd, 1);
1248 bits = cfs_hash_rehash_bits(hs);
1249 cfs_hash_unlock(hs, 0);
1251 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1253 EXPORT_SYMBOL(cfs_hash_add);
1255 static struct hlist_node *
1256 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1257 struct hlist_node *hnode, int noref)
1259 struct hlist_node *ehnode;
1260 struct cfs_hash_bd bds[2];
1263 LASSERTF(hlist_unhashed(hnode), "hnode = %px\n", hnode);
1265 cfs_hash_lock(hs, 0);
1266 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1268 cfs_hash_key_validate(hs, key, hnode);
1269 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1271 cfs_hash_dual_bd_unlock(hs, bds, 1);
1273 if (ehnode == hnode) /* new item added */
1274 bits = cfs_hash_rehash_bits(hs);
1275 cfs_hash_unlock(hs, 0);
1277 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1283 * Add item @hnode to libcfs hash @hs using @key. The registered
1284 * ops->hs_get function will be called if the item was added.
1285 * Returns 0 on success or -EALREADY on key collisions.
1288 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1289 struct hlist_node *hnode)
1291 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1294 EXPORT_SYMBOL(cfs_hash_add_unique);
1297 * Add item @hnode to libcfs hash @hs using @key. If this @key
1298 * already exists in the hash then ops->hs_get will be called on the
1299 * conflicting entry and that entry will be returned to the caller.
1300 * Otherwise ops->hs_get is called on the item which was added.
1303 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1304 struct hlist_node *hnode)
1306 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1308 return cfs_hash_object(hs, hnode);
1310 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1313 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1314 * is required to ensure the correct hash bucket is locked since there
1315 * is no direct linkage from the item to the bucket. The object
1316 * removed from the hash will be returned and obs->hs_put is called
1317 * on the removed object.
1320 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1324 struct cfs_hash_bd bds[2];
1326 cfs_hash_lock(hs, 0);
1327 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1329 /* NB: do nothing if @hnode is not in hash table */
1330 if (hnode == NULL || !hlist_unhashed(hnode)) {
1331 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1332 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1334 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1339 if (hnode != NULL) {
1340 obj = cfs_hash_object(hs, hnode);
1341 bits = cfs_hash_rehash_bits(hs);
1344 cfs_hash_dual_bd_unlock(hs, bds, 1);
1345 cfs_hash_unlock(hs, 0);
1347 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1351 EXPORT_SYMBOL(cfs_hash_del);
1354 * Delete item given @key in libcfs hash @hs. The first @key found in
1355 * the hash will be removed, if the key exists multiple times in the hash
1356 * @hs this function must be called once per key. The removed object
1357 * will be returned and ops->hs_put is called on the removed object.
1360 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1362 return cfs_hash_del(hs, key, NULL);
1364 EXPORT_SYMBOL(cfs_hash_del_key);
1367 * Lookup an item using @key in the libcfs hash @hs and return it.
1368 * If the @key is found in the hash hs->hs_get() is called and the
1369 * matching objects is returned. It is the callers responsibility
1370 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1371 * when when finished with the object. If the @key was not found
1372 * in the hash @hs NULL is returned.
1375 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1378 struct hlist_node *hnode;
1379 struct cfs_hash_bd bds[2];
1381 cfs_hash_lock(hs, 0);
1382 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1384 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1386 obj = cfs_hash_object(hs, hnode);
1388 cfs_hash_dual_bd_unlock(hs, bds, 0);
1389 cfs_hash_unlock(hs, 0);
1393 EXPORT_SYMBOL(cfs_hash_lookup);
1396 cfs_hash_for_each_enter(struct cfs_hash *hs)
1398 LASSERT(!cfs_hash_is_exiting(hs));
1400 if (!cfs_hash_with_rehash(hs))
1403 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1404 * because it's just an unreliable signal to rehash-thread,
1405 * rehash-thread will try to finish rehash ASAP when seeing this.
1407 hs->hs_iterating = 1;
1409 cfs_hash_lock(hs, 1);
1411 cfs_hash_unlock(hs, 1);
1413 /* NB: iteration is mostly called by service thread,
1414 * we tend to cancel pending rehash-request, instead of
1415 * blocking service thread, we will relaunch rehash request
1418 if (cfs_hash_is_rehashing(hs))
1419 cfs_hash_rehash_cancel(hs);
1423 cfs_hash_for_each_exit(struct cfs_hash *hs)
1428 if (!cfs_hash_with_rehash(hs))
1430 cfs_hash_lock(hs, 1);
1431 remained = --hs->hs_iterators;
1432 bits = cfs_hash_rehash_bits(hs);
1433 cfs_hash_unlock(hs, 1);
1434 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1436 hs->hs_iterating = 0;
1438 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1444 * For each item in the libcfs hash @hs call the passed callback @func
1445 * and pass to it as an argument each hash item and the private @data.
1447 * a) the function may sleep!
1448 * b) during the callback:
1449 * . the bucket lock is held so the callback must never sleep.
1450 * . if @removal_safe is true, use can remove current item by
1451 * cfs_hash_bd_del_locked
1454 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1455 void *data, int remove_safe)
1457 struct hlist_node *hnode;
1458 struct hlist_node *pos;
1459 struct cfs_hash_bd bd;
1461 int excl = !!remove_safe;
1466 cfs_hash_for_each_enter(hs);
1468 cfs_hash_lock(hs, 0);
1469 LASSERT(!cfs_hash_is_rehashing(hs));
1471 cfs_hash_for_each_bucket(hs, &bd, i) {
1472 struct hlist_head *hhead;
1474 cfs_hash_bd_lock(hs, &bd, excl);
1475 if (func == NULL) { /* only glimpse size */
1476 count += bd.bd_bucket->hsb_count;
1477 cfs_hash_bd_unlock(hs, &bd, excl);
1481 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1482 hlist_for_each_safe(hnode, pos, hhead) {
1483 cfs_hash_bucket_validate(hs, &bd, hnode);
1486 if (func(hs, &bd, hnode, data)) {
1487 cfs_hash_bd_unlock(hs, &bd, excl);
1492 cfs_hash_bd_unlock(hs, &bd, excl);
1493 if (loop < CFS_HASH_LOOP_HOG)
1496 cfs_hash_unlock(hs, 0);
1498 cfs_hash_lock(hs, 0);
1501 cfs_hash_unlock(hs, 0);
1503 cfs_hash_for_each_exit(hs);
1507 struct cfs_hash_cond_arg {
1508 cfs_hash_cond_opt_cb_t func;
1513 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1514 struct hlist_node *hnode, void *data)
1516 struct cfs_hash_cond_arg *cond = data;
1518 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1519 cfs_hash_bd_del_locked(hs, bd, hnode);
1524 * Delete item from the libcfs hash @hs when @func return true.
1525 * The write lock being hold during loop for each bucket to avoid
1526 * any object be reference.
1529 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1531 struct cfs_hash_cond_arg arg = {
1536 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1538 EXPORT_SYMBOL(cfs_hash_cond_del);
1541 cfs_hash_for_each(struct cfs_hash *hs,
1542 cfs_hash_for_each_cb_t func, void *data)
1544 cfs_hash_for_each_tight(hs, func, data, 0);
1546 EXPORT_SYMBOL(cfs_hash_for_each);
1549 cfs_hash_for_each_safe(struct cfs_hash *hs,
1550 cfs_hash_for_each_cb_t func, void *data)
1552 cfs_hash_for_each_tight(hs, func, data, 1);
1554 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1557 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1558 struct hlist_node *hnode, void *data)
1561 return 1; /* return 1 to break the loop */
1565 cfs_hash_is_empty(struct cfs_hash *hs)
1569 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1572 EXPORT_SYMBOL(cfs_hash_is_empty);
1575 cfs_hash_size_get(struct cfs_hash *hs)
1577 return cfs_hash_with_counter(hs) ?
1578 atomic_read(&hs->hs_count) :
1579 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1581 EXPORT_SYMBOL(cfs_hash_size_get);
1584 * cfs_hash_for_each_relax:
1585 * Iterate the hash table and call @func on each item without
1586 * any lock. This function can't guarantee to finish iteration
1587 * if these features are enabled:
1589 * a. if rehash_key is enabled, an item can be moved from
1590 * one bucket to another bucket
1591 * b. user can remove non-zero-ref item from hash-table,
1592 * so the item can be removed from hash-table, even worse,
1593 * it's possible that user changed key and insert to another
1595 * there's no way for us to finish iteration correctly on previous
1596 * two cases, so iteration has to be stopped on change.
1599 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1600 void *data, int *pstart)
1602 struct hlist_node *hnode;
1603 struct hlist_node *next = NULL;
1604 struct cfs_hash_bd bd;
1613 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1614 !cfs_hash_with_no_itemref(hs);
1615 has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1616 cfs_hash_lock(hs, 0);
1618 LASSERT(!cfs_hash_is_rehashing(hs));
1620 cfs_hash_for_each_bucket(hs, &bd, i) {
1621 struct hlist_head *hhead;
1623 if (pstart && i < *pstart)
1625 else if (end > 0 && i >= end)
1628 cfs_hash_bd_lock(hs, &bd, 0);
1629 version = cfs_hash_bd_version_get(&bd);
1631 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1632 hnode = hhead->first;
1635 cfs_hash_get(hs, hnode);
1636 for (; hnode != NULL; hnode = next) {
1637 cfs_hash_bucket_validate(hs, &bd, hnode);
1640 cfs_hash_get(hs, next);
1641 cfs_hash_bd_unlock(hs, &bd, 0);
1642 cfs_hash_unlock(hs, 0);
1644 rc = func(hs, &bd, hnode, data);
1645 if (stop_on_change || !has_put_locked)
1646 cfs_hash_put(hs, hnode);
1651 cfs_hash_lock(hs, 0);
1652 cfs_hash_bd_lock(hs, &bd, 0);
1653 if (stop_on_change) {
1655 cfs_hash_bd_version_get(&bd))
1657 } else if (has_put_locked) {
1658 cfs_hash_put_locked(hs, hnode);
1660 if (rc) /* callback wants to break iteration */
1664 if (has_put_locked) {
1665 cfs_hash_put_locked(hs, next);
1669 } else if (rc != 0) {
1673 cfs_hash_bd_unlock(hs, &bd, 0);
1674 if (next != NULL && !has_put_locked) {
1675 cfs_hash_put(hs, next);
1678 if (rc) /* callback wants to break iteration */
1682 if (pstart && *pstart > 0 && rc == 0) {
1688 cfs_hash_unlock(hs, 0);
1695 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1696 cfs_hash_for_each_cb_t func, void *data, int start)
1700 if (cfs_hash_with_no_lock(hs) ||
1701 cfs_hash_with_rehash_key(hs) ||
1702 !cfs_hash_with_no_itemref(hs))
1703 RETURN(-EOPNOTSUPP);
1705 if (hs->hs_ops->hs_get == NULL ||
1706 (hs->hs_ops->hs_put == NULL &&
1707 hs->hs_ops->hs_put_locked == NULL))
1708 RETURN(-EOPNOTSUPP);
1710 cfs_hash_for_each_enter(hs);
1711 cfs_hash_for_each_relax(hs, func, data, &start);
1712 cfs_hash_for_each_exit(hs);
1716 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1719 * For each hash bucket in the libcfs hash @hs call the passed callback
1720 * @func until all the hash buckets are empty. The passed callback @func
1721 * or the previously registered callback hs->hs_put must remove the item
1722 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1723 * functions. No rwlocks will be held during the callback @func it is
1724 * safe to sleep if needed. This function will not terminate until the
1725 * hash is empty. Note it is still possible to concurrently add new
1726 * items in to the hash. It is the callers responsibility to ensure
1727 * the required locking is in place to prevent concurrent insertions.
1730 cfs_hash_for_each_empty(struct cfs_hash *hs,
1731 cfs_hash_for_each_cb_t func, void *data)
1737 if (cfs_hash_with_no_lock(hs))
1740 if (hs->hs_ops->hs_get == NULL ||
1741 (hs->hs_ops->hs_put == NULL &&
1742 hs->hs_ops->hs_put_locked == NULL))
1745 cfs_hash_for_each_enter(hs);
1746 while (cfs_hash_for_each_relax(hs, func, data, &start)) {
1747 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1750 cfs_hash_for_each_exit(hs);
1753 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1756 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1757 cfs_hash_for_each_cb_t func, void *data)
1759 struct hlist_head *hhead;
1760 struct hlist_node *hnode;
1761 struct cfs_hash_bd bd;
1763 cfs_hash_for_each_enter(hs);
1764 cfs_hash_lock(hs, 0);
1765 if (hindex >= CFS_HASH_NHLIST(hs))
1768 cfs_hash_bd_index_set(hs, hindex, &bd);
1770 cfs_hash_bd_lock(hs, &bd, 0);
1771 hhead = cfs_hash_bd_hhead(hs, &bd);
1772 hlist_for_each(hnode, hhead) {
1773 if (func(hs, &bd, hnode, data))
1776 cfs_hash_bd_unlock(hs, &bd, 0);
1778 cfs_hash_unlock(hs, 0);
1779 cfs_hash_for_each_exit(hs);
1782 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1785 * For each item in the libcfs hash @hs which matches the @key call
1786 * the passed callback @func and pass to it as an argument each hash
1787 * item and the private @data. During the callback the bucket lock
1788 * is held so the callback must never sleep.
1791 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1792 cfs_hash_for_each_cb_t func, void *data)
1794 struct hlist_node *hnode;
1795 struct cfs_hash_bd bds[2];
1798 cfs_hash_lock(hs, 0);
1800 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1802 cfs_hash_for_each_bd(bds, 2, i) {
1803 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1805 hlist_for_each(hnode, hlist) {
1806 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1808 if (cfs_hash_keycmp(hs, key, hnode)) {
1809 if (func(hs, &bds[i], hnode, data))
1815 cfs_hash_dual_bd_unlock(hs, bds, 0);
1816 cfs_hash_unlock(hs, 0);
1818 EXPORT_SYMBOL(cfs_hash_for_each_key);
1821 * Rehash the libcfs hash @hs to the given @bits. This can be used
1822 * to grow the hash size when excessive chaining is detected, or to
1823 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1824 * flag is set in @hs the libcfs hash may be dynamically rehashed
1825 * during addition or removal if the hash's theta value exceeds
1826 * either the hs->hs_min_theta or hs->max_theta values. By default
1827 * these values are tuned to keep the chained hash depth small, and
1828 * this approach assumes a reasonably uniform hashing function. The
1829 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1832 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1834 LASSERT(hs->hs_iterators > 0 || hs->hs_exiting);
1835 while (cfs_hash_is_rehashing(hs)) {
1836 if (cancel_work_sync(&hs->hs_rehash_work)) {
1837 cfs_hash_lock(hs, 1);
1838 hs->hs_rehash_bits = 0;
1839 cfs_hash_unlock(hs, 1);
1846 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1850 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1852 cfs_hash_lock(hs, 1);
1854 rc = cfs_hash_rehash_bits(hs);
1856 cfs_hash_unlock(hs, 1);
1860 hs->hs_rehash_bits = rc;
1862 /* launch and return */
1863 queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
1864 cfs_hash_unlock(hs, 1);
1868 /* rehash right now */
1869 cfs_hash_unlock(hs, 1);
1871 cfs_hash_rehash_worker(&hs->hs_rehash_work);
1875 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1877 struct cfs_hash_bd new;
1878 struct hlist_head *hhead;
1879 struct hlist_node *hnode;
1880 struct hlist_node *pos;
1884 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1885 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1886 hlist_for_each_safe(hnode, pos, hhead) {
1887 key = cfs_hash_key(hs, hnode);
1888 LASSERT(key != NULL);
1889 /* Validate hnode is in the correct bucket. */
1890 cfs_hash_bucket_validate(hs, old, hnode);
1892 * Delete from old hash bucket; move to new bucket.
1893 * ops->hs_key must be defined.
1895 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1896 hs->hs_rehash_bits, key, &new);
1897 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1905 cfs_hash_rehash_worker(struct work_struct *work)
1907 struct cfs_hash *hs = container_of(work, struct cfs_hash,
1909 struct cfs_hash_bucket **bkts;
1910 struct cfs_hash_bd bd;
1911 unsigned int old_size;
1912 unsigned int new_size;
1918 LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1920 cfs_hash_lock(hs, 0);
1921 LASSERT(cfs_hash_is_rehashing(hs));
1923 old_size = CFS_HASH_NBKT(hs);
1924 new_size = CFS_HASH_RH_NBKT(hs);
1926 cfs_hash_unlock(hs, 0);
1929 * don't need hs::hs_rwlock for hs::hs_buckets,
1930 * because nobody can change bkt-table except me.
1932 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1933 old_size, new_size);
1934 cfs_hash_lock(hs, 1);
1940 if (bkts == hs->hs_buckets) {
1941 bkts = NULL; /* do nothing */
1945 rc = __cfs_hash_theta(hs);
1946 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1947 /* free the new allocated bkt-table */
1948 old_size = new_size;
1949 new_size = CFS_HASH_NBKT(hs);
1954 LASSERT(hs->hs_rehash_buckets == NULL);
1955 hs->hs_rehash_buckets = bkts;
1958 cfs_hash_for_each_bucket(hs, &bd, i) {
1959 if (cfs_hash_is_exiting(hs)) {
1961 /* someone wants to destroy the hash, abort now */
1962 if (old_size < new_size) /* OK to free old bkt-table */
1964 /* it's shrinking, need free new bkt-table */
1965 hs->hs_rehash_buckets = NULL;
1966 old_size = new_size;
1967 new_size = CFS_HASH_NBKT(hs);
1971 count += cfs_hash_rehash_bd(hs, &bd);
1972 if (count < CFS_HASH_LOOP_HOG ||
1973 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1978 cfs_hash_unlock(hs, 1);
1980 cfs_hash_lock(hs, 1);
1983 hs->hs_rehash_count++;
1985 bkts = hs->hs_buckets;
1986 hs->hs_buckets = hs->hs_rehash_buckets;
1987 hs->hs_rehash_buckets = NULL;
1989 hs->hs_cur_bits = hs->hs_rehash_bits;
1991 hs->hs_rehash_bits = 0;
1992 bsize = cfs_hash_bkt_size(hs);
1993 cfs_hash_unlock(hs, 1);
1994 /* can't refer to @hs anymore because it could be destroyed */
1996 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1998 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
2002 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
2003 * @old_key must be provided to locate the objects previous location
2004 * in the hash, and the @new_key will be used to reinsert the object.
2005 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
2006 * combo when it is critical that there is no window in time where the
2007 * object is missing from the hash. When an object is being rehashed
2008 * the registered cfs_hash_get() and cfs_hash_put() functions will
2011 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2012 void *new_key, struct hlist_node *hnode)
2014 struct cfs_hash_bd bds[3];
2015 struct cfs_hash_bd old_bds[2];
2016 struct cfs_hash_bd new_bd;
2018 LASSERT(!hlist_unhashed(hnode));
2020 cfs_hash_lock(hs, 0);
2022 cfs_hash_dual_bd_get(hs, old_key, old_bds);
2023 cfs_hash_bd_get(hs, new_key, &new_bd);
2025 bds[0] = old_bds[0];
2026 bds[1] = old_bds[1];
2029 /* NB: bds[0] and bds[1] are ordered already */
2030 cfs_hash_bd_order(&bds[1], &bds[2]);
2031 cfs_hash_bd_order(&bds[0], &bds[1]);
2033 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2034 if (likely(old_bds[1].bd_bucket == NULL)) {
2035 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2037 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2038 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2040 /* overwrite key inside locks, otherwise may screw up with
2041 * other operations, i.e: rehash */
2042 cfs_hash_keycpy(hs, hnode, new_key);
2044 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2045 cfs_hash_unlock(hs, 0);
2047 EXPORT_SYMBOL(cfs_hash_rehash_key);
2049 void cfs_hash_debug_header(struct seq_file *m)
2051 seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
2052 CFS_HASH_BIGNAME_LEN, "name");
2054 EXPORT_SYMBOL(cfs_hash_debug_header);
2056 static struct cfs_hash_bucket **
2057 cfs_hash_full_bkts(struct cfs_hash *hs)
2059 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2060 if (hs->hs_rehash_buckets == NULL)
2061 return hs->hs_buckets;
2063 LASSERT(hs->hs_rehash_bits != 0);
2064 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2065 hs->hs_rehash_buckets : hs->hs_buckets;
2069 cfs_hash_full_nbkt(struct cfs_hash *hs)
2071 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2072 if (hs->hs_rehash_buckets == NULL)
2073 return CFS_HASH_NBKT(hs);
2075 LASSERT(hs->hs_rehash_bits != 0);
2076 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2077 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2080 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2082 int dist[8] = { 0, };
2089 cfs_hash_lock(hs, 0);
2090 theta = __cfs_hash_theta(hs);
2092 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2093 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2094 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2095 1 << hs->hs_max_bits,
2096 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2097 __cfs_hash_theta_int(hs->hs_min_theta),
2098 __cfs_hash_theta_frac(hs->hs_min_theta),
2099 __cfs_hash_theta_int(hs->hs_max_theta),
2100 __cfs_hash_theta_frac(hs->hs_max_theta),
2101 hs->hs_flags, hs->hs_rehash_count);
2104 * The distribution is a summary of the chained hash depth in
2105 * each of the libcfs hash buckets. Each buckets hsb_count is
2106 * divided by the hash theta value and used to generate a
2107 * histogram of the hash distribution. A uniform hash will
2108 * result in all hash buckets being close to the average thus
2109 * only the first few entries in the histogram will be non-zero.
2110 * If you hash function results in a non-uniform hash the will
2111 * be observable by outlier bucks in the distribution histogram.
2113 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2114 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2116 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2117 struct cfs_hash_bd bd;
2119 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2120 cfs_hash_bd_lock(hs, &bd, 0);
2121 if (maxdep < bd.bd_bucket->hsb_depmax) {
2122 maxdep = bd.bd_bucket->hsb_depmax;
2123 maxdepb = ffz(~maxdep);
2125 total += bd.bd_bucket->hsb_count;
2126 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2127 cfs_hash_bd_unlock(hs, &bd, 0);
2130 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2131 for (i = 0; i < 8; i++)
2132 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2134 cfs_hash_unlock(hs, 0);
2136 EXPORT_SYMBOL(cfs_hash_debug_str);