Whamcloud - gitweb
db30f8c9cbd0d5253ea652928e800000af89b775
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(intead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can speicify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109
110 #include <libcfs/libcfs.h>
111
112 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
113 static unsigned int warn_on_depth = 0;
114 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
115                 "warning when hash depth is high.");
116 #endif
117
118 static inline void
119 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
120
121 static inline void
122 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
126 {
127         cfs_spin_lock(&lock->spin);
128 }
129
130 static inline void
131 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
132 {
133         cfs_spin_unlock(&lock->spin);
134 }
135
136 static inline void
137 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
138 {
139         if (!exclusive)
140                 cfs_read_lock(&lock->rw);
141         else
142                 cfs_write_lock(&lock->rw);
143 }
144
145 static inline void
146 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
147 {
148         if (!exclusive)
149                 cfs_read_unlock(&lock->rw);
150         else
151                 cfs_write_unlock(&lock->rw);
152 }
153
154 /** No lock hash */
155 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
156 {
157         .hs_lock        = cfs_hash_nl_lock,
158         .hs_unlock      = cfs_hash_nl_unlock,
159         .hs_bkt_lock    = cfs_hash_nl_lock,
160         .hs_bkt_unlock  = cfs_hash_nl_unlock,
161 };
162
163 /** no bucket lock, one spinlock to protect everything */
164 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
165 {
166         .hs_lock        = cfs_hash_spin_lock,
167         .hs_unlock      = cfs_hash_spin_unlock,
168         .hs_bkt_lock    = cfs_hash_nl_lock,
169         .hs_bkt_unlock  = cfs_hash_nl_unlock,
170 };
171
172 /** spin bucket lock, rehash is enabled */
173 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
174 {
175         .hs_lock        = cfs_hash_rw_lock,
176         .hs_unlock      = cfs_hash_rw_unlock,
177         .hs_bkt_lock    = cfs_hash_spin_lock,
178         .hs_bkt_unlock  = cfs_hash_spin_unlock,
179 };
180
181 /** rw bucket lock, rehash is enabled */
182 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
183 {
184         .hs_lock        = cfs_hash_rw_lock,
185         .hs_unlock      = cfs_hash_rw_unlock,
186         .hs_bkt_lock    = cfs_hash_rw_lock,
187         .hs_bkt_unlock  = cfs_hash_rw_unlock,
188 };
189
190 /** spin bucket lock, rehash is disabled */
191 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
192 {
193         .hs_lock        = cfs_hash_nl_lock,
194         .hs_unlock      = cfs_hash_nl_unlock,
195         .hs_bkt_lock    = cfs_hash_spin_lock,
196         .hs_bkt_unlock  = cfs_hash_spin_unlock,
197 };
198
199 /** rw bucket lock, rehash is disabled */
200 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
201 {
202         .hs_lock        = cfs_hash_nl_lock,
203         .hs_unlock      = cfs_hash_nl_unlock,
204         .hs_bkt_lock    = cfs_hash_rw_lock,
205         .hs_bkt_unlock  = cfs_hash_rw_unlock,
206 };
207
208 static void
209 cfs_hash_lock_setup(cfs_hash_t *hs)
210 {
211         if (cfs_hash_with_no_lock(hs)) {
212                 hs->hs_lops = &cfs_hash_nl_lops;
213
214         } else if (cfs_hash_with_no_bktlock(hs)) {
215                 hs->hs_lops = &cfs_hash_nbl_lops;
216                 cfs_spin_lock_init(&hs->hs_lock.spin);
217
218         } else if (cfs_hash_with_rehash(hs)) {
219                 cfs_rwlock_init(&hs->hs_lock.rw);
220
221                 if (cfs_hash_with_rw_bktlock(hs))
222                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
223                 else if (cfs_hash_with_spin_bktlock(hs))
224                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
225                 else
226                         LBUG();
227         } else {
228                 if (cfs_hash_with_rw_bktlock(hs))
229                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
230                 else if (cfs_hash_with_spin_bktlock(hs))
231                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
232                 else
233                         LBUG();
234         }
235 }
236
237 /**
238  * Simple hash head without depth tracking
239  * new element is always added to head of hlist
240  */
241 typedef struct {
242         cfs_hlist_head_t        hh_head;        /**< entries list */
243 } cfs_hash_head_t;
244
245 static int
246 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
247 {
248         return sizeof(cfs_hash_head_t);
249 }
250
251 static cfs_hlist_head_t *
252 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
253 {
254         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
255
256         return &head[bd->bd_offset].hh_head;
257 }
258
259 static int
260 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
261                       cfs_hlist_node_t *hnode)
262 {
263         cfs_hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
264         return -1; /* unknown depth */
265 }
266
267 static int
268 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
269                       cfs_hlist_node_t *hnode)
270 {
271         cfs_hlist_del_init(hnode);
272         return -1; /* unknown depth */
273 }
274
275 /**
276  * Simple hash head with depth tracking
277  * new element is always added to head of hlist
278  */
279 typedef struct {
280         cfs_hlist_head_t        hd_head;        /**< entries list */
281         unsigned int            hd_depth;       /**< list length */
282 } cfs_hash_head_dep_t;
283
284 static int
285 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
286 {
287         return sizeof(cfs_hash_head_dep_t);
288 }
289
290 static cfs_hlist_head_t *
291 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
292 {
293         cfs_hash_head_dep_t   *head;
294
295         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
296         return &head[bd->bd_offset].hd_head;
297 }
298
299 static int
300 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
301                       cfs_hlist_node_t *hnode)
302 {
303         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
304                                                cfs_hash_head_dep_t, hd_head);
305         cfs_hlist_add_head(hnode, &hh->hd_head);
306         return ++hh->hd_depth;
307 }
308
309 static int
310 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
311                       cfs_hlist_node_t *hnode)
312 {
313         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
314                                                cfs_hash_head_dep_t, hd_head);
315         cfs_hlist_del_init(hnode);
316         return --hh->hd_depth;
317 }
318
319 /**
320  * double links hash head without depth tracking
321  * new element is always added to tail of hlist
322  */
323 typedef struct {
324         cfs_hlist_head_t        dh_head;        /**< entries list */
325         cfs_hlist_node_t       *dh_tail;        /**< the last entry */
326 } cfs_hash_dhead_t;
327
328 static int
329 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
330 {
331         return sizeof(cfs_hash_dhead_t);
332 }
333
334 static cfs_hlist_head_t *
335 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
336 {
337         cfs_hash_dhead_t *head;
338
339         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
340         return &head[bd->bd_offset].dh_head;
341 }
342
343 static int
344 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
345                       cfs_hlist_node_t *hnode)
346 {
347         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
348                                             cfs_hash_dhead_t, dh_head);
349
350         if (dh->dh_tail != NULL) /* not empty */
351                 cfs_hlist_add_after(dh->dh_tail, hnode);
352         else /* empty list */
353                 cfs_hlist_add_head(hnode, &dh->dh_head);
354         dh->dh_tail = hnode;
355         return -1; /* unknown depth */
356 }
357
358 static int
359 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
360                       cfs_hlist_node_t *hnd)
361 {
362         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
363                                             cfs_hash_dhead_t, dh_head);
364
365         if (hnd->next == NULL) { /* it's the tail */
366                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
367                               container_of(hnd->pprev, cfs_hlist_node_t, next);
368         }
369         cfs_hlist_del_init(hnd);
370         return -1; /* unknown depth */
371 }
372
373 /**
374  * double links hash head with depth tracking
375  * new element is always added to tail of hlist
376  */
377 typedef struct {
378         cfs_hlist_head_t        dd_head;        /**< entries list */
379         cfs_hlist_node_t       *dd_tail;        /**< the last entry */
380         unsigned int            dd_depth;       /**< list length */
381 } cfs_hash_dhead_dep_t;
382
383 static int
384 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
385 {
386         return sizeof(cfs_hash_dhead_dep_t);
387 }
388
389 static cfs_hlist_head_t *
390 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
391 {
392         cfs_hash_dhead_dep_t *head;
393
394         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
395         return &head[bd->bd_offset].dd_head;
396 }
397
398 static int
399 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
400                       cfs_hlist_node_t *hnode)
401 {
402         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
403                                                 cfs_hash_dhead_dep_t, dd_head);
404
405         if (dh->dd_tail != NULL) /* not empty */
406                 cfs_hlist_add_after(dh->dd_tail, hnode);
407         else /* empty list */
408                 cfs_hlist_add_head(hnode, &dh->dd_head);
409         dh->dd_tail = hnode;
410         return ++dh->dd_depth;
411 }
412
413 static int
414 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
415                       cfs_hlist_node_t *hnd)
416 {
417         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
418                                                 cfs_hash_dhead_dep_t, dd_head);
419
420         if (hnd->next == NULL) { /* it's the tail */
421                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
422                               container_of(hnd->pprev, cfs_hlist_node_t, next);
423         }
424         cfs_hlist_del_init(hnd);
425         return --dh->dd_depth;
426 }
427
428 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
429        .hop_hhead      = cfs_hash_hh_hhead,
430        .hop_hhead_size = cfs_hash_hh_hhead_size,
431        .hop_hnode_add  = cfs_hash_hh_hnode_add,
432        .hop_hnode_del  = cfs_hash_hh_hnode_del,
433 };
434
435 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
436        .hop_hhead      = cfs_hash_hd_hhead,
437        .hop_hhead_size = cfs_hash_hd_hhead_size,
438        .hop_hnode_add  = cfs_hash_hd_hnode_add,
439        .hop_hnode_del  = cfs_hash_hd_hnode_del,
440 };
441
442 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
443        .hop_hhead      = cfs_hash_dh_hhead,
444        .hop_hhead_size = cfs_hash_dh_hhead_size,
445        .hop_hnode_add  = cfs_hash_dh_hnode_add,
446        .hop_hnode_del  = cfs_hash_dh_hnode_del,
447 };
448
449 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
450        .hop_hhead      = cfs_hash_dd_hhead,
451        .hop_hhead_size = cfs_hash_dd_hhead_size,
452        .hop_hnode_add  = cfs_hash_dd_hnode_add,
453        .hop_hnode_del  = cfs_hash_dd_hnode_del,
454 };
455
456 static void
457 cfs_hash_hlist_setup(cfs_hash_t *hs)
458 {
459         if (cfs_hash_with_add_tail(hs)) {
460                 hs->hs_hops = cfs_hash_with_depth(hs) ?
461                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
462         } else {
463                 hs->hs_hops = cfs_hash_with_depth(hs) ?
464                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
465         }
466 }
467
468 static void
469 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
470                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
471 {
472         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
473
474         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
475
476         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
477         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
478 }
479
480 void
481 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
482 {
483         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
484         if (likely(hs->hs_rehash_buckets == NULL)) {
485                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
486                                      hs->hs_cur_bits, key, bd);
487         } else {
488                 LASSERT(hs->hs_rehash_bits != 0);
489                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
490                                      hs->hs_rehash_bits, key, bd);
491         }
492 }
493 CFS_EXPORT_SYMBOL(cfs_hash_bd_get);
494
495 static inline void
496 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
497 {
498         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
499                 return;
500
501         bd->bd_bucket->hsb_depmax = dep_cur;
502 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
503         if (likely(warn_on_depth == 0 ||
504                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
505                 return;
506
507         cfs_spin_lock(&hs->hs_dep_lock);
508         hs->hs_dep_max  = dep_cur;
509         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
510         hs->hs_dep_off  = bd->bd_offset;
511         hs->hs_dep_bits = hs->hs_cur_bits;
512         cfs_spin_unlock(&hs->hs_dep_lock);
513
514         cfs_wi_schedule(&hs->hs_dep_wi);
515 # endif
516 }
517
518 void
519 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
520                        cfs_hlist_node_t *hnode)
521 {
522         int                rc;
523
524         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
525         cfs_hash_bd_dep_record(hs, bd, rc);
526         bd->bd_bucket->hsb_version++;
527         if (unlikely(bd->bd_bucket->hsb_version == 0))
528                 bd->bd_bucket->hsb_version++;
529         bd->bd_bucket->hsb_count++;
530
531         if (cfs_hash_with_counter(hs))
532                 cfs_atomic_inc(&hs->hs_count);
533         if (!cfs_hash_with_no_itemref(hs))
534                 cfs_hash_get(hs, hnode);
535 }
536 CFS_EXPORT_SYMBOL(cfs_hash_bd_add_locked);
537
538 void
539 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
540                        cfs_hlist_node_t *hnode)
541 {
542         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
543
544         LASSERT(bd->bd_bucket->hsb_count > 0);
545         bd->bd_bucket->hsb_count--;
546         bd->bd_bucket->hsb_version++;
547         if (unlikely(bd->bd_bucket->hsb_version == 0))
548                 bd->bd_bucket->hsb_version++;
549
550         if (cfs_hash_with_counter(hs)) {
551                 LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
552                 cfs_atomic_dec(&hs->hs_count);
553         }
554         if (!cfs_hash_with_no_itemref(hs))
555                 cfs_hash_put_locked(hs, hnode);
556 }
557 CFS_EXPORT_SYMBOL(cfs_hash_bd_del_locked);
558
559 void
560 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
561                         cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode)
562 {
563         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
564         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
565         int                rc;
566
567         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
568                 return;
569
570         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
571          * in cfs_hash_bd_del/add_locked */
572         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
573         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
574         cfs_hash_bd_dep_record(hs, bd_new, rc);
575
576         LASSERT(obkt->hsb_count > 0);
577         obkt->hsb_count--;
578         obkt->hsb_version++;
579         if (unlikely(obkt->hsb_version == 0))
580                 obkt->hsb_version++;
581         nbkt->hsb_count++;
582         nbkt->hsb_version++;
583         if (unlikely(nbkt->hsb_version == 0))
584                 nbkt->hsb_version++;
585 }
586 CFS_EXPORT_SYMBOL(cfs_hash_bd_move_locked);
587
588 enum {
589         /** always set, for sanity (avoid ZERO intent) */
590         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
591         /** return entry with a ref */
592         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
593         /** add entry if not existing */
594         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
595         /** delete entry, ignore other masks */
596         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
597 };
598
599 typedef enum cfs_hash_lookup_intent {
600         /** return item w/o refcount */
601         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
602         /** return item with refcount */
603         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
604                                        CFS_HS_LOOKUP_MASK_REF),
605         /** return item w/o refcount if existed, otherwise add */
606         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
607                                        CFS_HS_LOOKUP_MASK_ADD),
608         /** return item with refcount if existed, otherwise add */
609         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
610                                        CFS_HS_LOOKUP_MASK_ADD),
611         /** delete if existed */
612         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
613                                        CFS_HS_LOOKUP_MASK_DEL)
614 } cfs_hash_lookup_intent_t;
615
616 static cfs_hlist_node_t *
617 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
618                           const void *key, cfs_hlist_node_t *hnode,
619                           cfs_hash_lookup_intent_t intent)
620
621 {
622         cfs_hlist_head_t  *hhead = cfs_hash_bd_hhead(hs, bd);
623         cfs_hlist_node_t  *ehnode;
624         cfs_hlist_node_t  *match;
625         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
626
627         /* with this function, we can avoid a lot of useless refcount ops,
628          * which are expensive atomic operations most time. */
629         match = intent_add ? NULL : hnode;
630         cfs_hlist_for_each(ehnode, hhead) {
631                 if (!cfs_hash_keycmp(hs, key, ehnode))
632                         continue;
633
634                 if (match != NULL && match != ehnode) /* can't match */
635                         continue;
636
637                 /* match and ... */
638                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
639                         cfs_hash_bd_del_locked(hs, bd, ehnode);
640                         return ehnode;
641                 }
642
643                 /* caller wants refcount? */
644                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
645                         cfs_hash_get(hs, ehnode);
646                 return ehnode;
647         }
648         /* no match item */
649         if (!intent_add)
650                 return NULL;
651
652         LASSERT(hnode != NULL);
653         cfs_hash_bd_add_locked(hs, bd, hnode);
654         return hnode;
655 }
656
657 cfs_hlist_node_t *
658 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
659 {
660         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
661                                          CFS_HS_LOOKUP_IT_FIND);
662 }
663 CFS_EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
664
665 cfs_hlist_node_t *
666 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
667                            const void *key, cfs_hlist_node_t *hnode,
668                            int noref)
669 {
670         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
671                                          CFS_HS_LOOKUP_IT_ADD |
672                                          (!noref * CFS_HS_LOOKUP_MASK_REF));
673 }
674 CFS_EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
675
676 cfs_hlist_node_t *
677 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
678                            const void *key, cfs_hlist_node_t *hnode)
679 {
680         /* hnode can be NULL, we find the first item with @key */
681         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
682                                          CFS_HS_LOOKUP_IT_FINDDEL);
683 }
684 CFS_EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
685
686 static void
687 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
688                        unsigned n, int excl)
689 {
690         cfs_hash_bucket_t *prev = NULL;
691         int                i;
692
693         /**
694          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
695          * NB: it's possible that several bds point to the same bucket but
696          * have different bd::bd_offset, so need take care of deadlock.
697          */
698         cfs_hash_for_each_bd(bds, n, i) {
699                 if (prev == bds[i].bd_bucket)
700                         continue;
701
702                 LASSERT(prev == NULL ||
703                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
704                 cfs_hash_bd_lock(hs, &bds[i], excl);
705                 prev = bds[i].bd_bucket;
706         }
707 }
708
709 static void
710 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
711                          unsigned n, int excl)
712 {
713         cfs_hash_bucket_t *prev = NULL;
714         int                i;
715
716         cfs_hash_for_each_bd(bds, n, i) {
717                 if (prev != bds[i].bd_bucket) {
718                         cfs_hash_bd_unlock(hs, &bds[i], excl);
719                         prev = bds[i].bd_bucket;
720                 }
721         }
722 }
723
724 static cfs_hlist_node_t *
725 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
726                                 unsigned n, const void *key)
727 {
728         cfs_hlist_node_t  *ehnode;
729         unsigned           i;
730
731         cfs_hash_for_each_bd(bds, n, i) {
732                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
733                                                    CFS_HS_LOOKUP_IT_FIND);
734                 if (ehnode != NULL)
735                         return ehnode;
736         }
737         return NULL;
738 }
739
740 static cfs_hlist_node_t *
741 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
742                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
743                                  cfs_hlist_node_t *hnode, int noref)
744 {
745         cfs_hlist_node_t  *ehnode;
746         int                intent;
747         unsigned           i;
748
749         LASSERT(hnode != NULL);
750         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
751
752         cfs_hash_for_each_bd(bds, n, i) {
753                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
754                                                    NULL, intent);
755                 if (ehnode != NULL)
756                         return ehnode;
757         }
758
759         if (i == 1) { /* only one bucket */
760                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
761         } else {
762                 cfs_hash_bd_t      mybd;
763
764                 cfs_hash_bd_get(hs, key, &mybd);
765                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
766         }
767
768         return hnode;
769 }
770
771 static cfs_hlist_node_t *
772 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
773                                  unsigned n, const void *key,
774                                  cfs_hlist_node_t *hnode)
775 {
776         cfs_hlist_node_t  *ehnode;
777         unsigned           i;
778
779         cfs_hash_for_each_bd(bds, n, i) {
780                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
781                                                    CFS_HS_LOOKUP_IT_FINDDEL);
782                 if (ehnode != NULL)
783                         return ehnode;
784         }
785         return NULL;
786 }
787
788 static void
789 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
790 {
791         int     rc;
792
793         if (bd2->bd_bucket == NULL)
794                 return;
795
796         if (bd1->bd_bucket == NULL) {
797                 *bd1 = *bd2;
798                 bd2->bd_bucket = NULL;
799                 return;
800         }
801
802         rc = cfs_hash_bd_compare(bd1, bd2);
803         if (rc == 0) {
804                 bd2->bd_bucket = NULL;
805
806         } else if (rc > 0) { /* swab bd1 and bd2 */
807                 cfs_hash_bd_t tmp;
808
809                 tmp = *bd2;
810                 *bd2 = *bd1;
811                 *bd1 = tmp;
812         }
813 }
814
815 void
816 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
817 {
818         /* NB: caller should hold hs_lock.rw if REHASH is set */
819         cfs_hash_bd_from_key(hs, hs->hs_buckets,
820                              hs->hs_cur_bits, key, &bds[0]);
821         if (likely(hs->hs_rehash_buckets == NULL)) {
822                 /* no rehash or not rehashing */
823                 bds[1].bd_bucket = NULL;
824                 return;
825         }
826
827         LASSERT(hs->hs_rehash_bits != 0);
828         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
829                              hs->hs_rehash_bits, key, &bds[1]);
830
831         cfs_hash_bd_order(&bds[0], &bds[1]);
832 }
833 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_get);
834
835 void
836 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
837 {
838         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
839 }
840 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
841
842 void
843 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
844 {
845         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
846 }
847 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
848
849 cfs_hlist_node_t *
850 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
851                                const void *key)
852 {
853         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
854 }
855 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
856
857 cfs_hlist_node_t *
858 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
859                                 const void *key, cfs_hlist_node_t *hnode,
860                                 int noref)
861 {
862         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
863                                                 hnode, noref);
864 }
865 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
866
867 cfs_hlist_node_t *
868 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
869                                 const void *key, cfs_hlist_node_t *hnode)
870 {
871         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
872 }
873 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
874
875 static void
876 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
877                       int bkt_size, int prev_size, int size)
878 {
879         int     i;
880
881         for (i = prev_size; i < size; i++) {
882                 if (buckets[i] != NULL)
883                         LIBCFS_FREE(buckets[i], bkt_size);
884         }
885
886         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
887 }
888
889 /*
890  * Create or grow bucket memory. Return old_buckets if no allocation was
891  * needed, the newly allocated buckets if allocation was needed and
892  * successful, and NULL on error.
893  */
894 static cfs_hash_bucket_t **
895 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
896                          unsigned int old_size, unsigned int new_size)
897 {
898         cfs_hash_bucket_t **new_bkts;
899         int                 i;
900
901         LASSERT(old_size == 0 || old_bkts != NULL);
902
903         if (old_bkts != NULL && old_size == new_size)
904                 return old_bkts;
905
906         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
907         if (new_bkts == NULL)
908                 return NULL;
909
910         if (old_bkts != NULL) {
911                 memcpy(new_bkts, old_bkts,
912                        min(old_size, new_size) * sizeof(*old_bkts));
913         }
914
915         for (i = old_size; i < new_size; i++) {
916                 cfs_hlist_head_t *hhead;
917                 cfs_hash_bd_t     bd;
918
919                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
920                 if (new_bkts[i] == NULL) {
921                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
922                                               old_size, new_size);
923                         return NULL;
924                 }
925
926                 new_bkts[i]->hsb_index   = i;
927                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
928                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
929                 bd.bd_bucket = new_bkts[i];
930                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
931                         CFS_INIT_HLIST_HEAD(hhead);
932
933                 if (cfs_hash_with_no_lock(hs) ||
934                     cfs_hash_with_no_bktlock(hs))
935                         continue;
936
937                 if (cfs_hash_with_rw_bktlock(hs))
938                         cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
939                 else if (cfs_hash_with_spin_bktlock(hs))
940                         cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
941                 else
942                         LBUG(); /* invalid use-case */
943         }
944         return new_bkts;
945 }
946
947 /**
948  * Initialize new libcfs hash, where:
949  * @name     - Descriptive hash name
950  * @cur_bits - Initial hash table size, in bits
951  * @max_bits - Maximum allowed hash table resize, in bits
952  * @ops      - Registered hash table operations
953  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
954  *           - CFS_HASH_SORT enable chained hash sort
955  */
956 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
957
958 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
959 static int cfs_hash_dep_print(cfs_workitem_t *wi)
960 {
961         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
962         int         dep;
963         int         bkt;
964         int         off;
965         int         bits;
966
967         cfs_spin_lock(&hs->hs_dep_lock);
968         dep  = hs->hs_dep_max;
969         bkt  = hs->hs_dep_bkt;
970         off  = hs->hs_dep_off;
971         bits = hs->hs_dep_bits;
972         cfs_spin_unlock(&hs->hs_dep_lock);
973
974         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
975                       hs->hs_name, bits, dep, bkt, off);
976         cfs_spin_lock(&hs->hs_dep_lock);
977         hs->hs_dep_bits = 0; /* mark as workitem done */
978         cfs_spin_unlock(&hs->hs_dep_lock);
979         return 0;
980 }
981
982 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
983 {
984         cfs_spin_lock_init(&hs->hs_dep_lock);
985         cfs_wi_init(&hs->hs_dep_wi, hs,
986                     cfs_hash_dep_print, CFS_WI_SCHED_ANY);
987 }
988
989 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
990 {
991         if (cfs_wi_cancel(&hs->hs_dep_wi))
992                 return;
993
994         cfs_spin_lock(&hs->hs_dep_lock);
995         while (hs->hs_dep_bits != 0) {
996                 cfs_spin_unlock(&hs->hs_dep_lock);
997                 cfs_cond_resched();
998                 cfs_spin_lock(&hs->hs_dep_lock);
999         }
1000         cfs_spin_unlock(&hs->hs_dep_lock);
1001 }
1002
1003 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1004
1005 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1006 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1007
1008 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1009
1010 cfs_hash_t *
1011 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1012                 unsigned bkt_bits, unsigned extra_bytes,
1013                 unsigned min_theta, unsigned max_theta,
1014                 cfs_hash_ops_t *ops, unsigned flags)
1015 {
1016         cfs_hash_t *hs;
1017         int         len;
1018
1019         ENTRY;
1020
1021         CLASSERT(CFS_HASH_THETA_BITS < 15);
1022
1023         LASSERT(name != NULL);
1024         LASSERT(ops != NULL);
1025         LASSERT(ops->hs_key);
1026         LASSERT(ops->hs_hash);
1027         LASSERT(ops->hs_object);
1028         LASSERT(ops->hs_keycmp);
1029         LASSERT(ops->hs_get != NULL);
1030         LASSERT(ops->hs_put_locked != NULL);
1031
1032         if ((flags & CFS_HASH_REHASH) != 0)
1033                 flags |= CFS_HASH_COUNTER; /* must have counter */
1034
1035         LASSERT(cur_bits > 0);
1036         LASSERT(cur_bits >= bkt_bits);
1037         LASSERT(max_bits >= cur_bits && max_bits < 31);
1038         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1039         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1040                      (flags & CFS_HASH_NO_LOCK) == 0));
1041         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1042                       ops->hs_keycpy != NULL));
1043
1044         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1045               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1046         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1047         if (hs == NULL)
1048                 RETURN(NULL);
1049
1050         strncpy(hs->hs_name, name, len);
1051         hs->hs_name[len - 1] = '\0';
1052         hs->hs_flags = flags;
1053
1054         cfs_atomic_set(&hs->hs_refcount, 1);
1055         cfs_atomic_set(&hs->hs_count, 0);
1056
1057         cfs_hash_lock_setup(hs);
1058         cfs_hash_hlist_setup(hs);
1059
1060         hs->hs_cur_bits = (__u8)cur_bits;
1061         hs->hs_min_bits = (__u8)cur_bits;
1062         hs->hs_max_bits = (__u8)max_bits;
1063         hs->hs_bkt_bits = (__u8)bkt_bits;
1064
1065         hs->hs_ops         = ops;
1066         hs->hs_extra_bytes = extra_bytes;
1067         hs->hs_rehash_bits = 0;
1068         cfs_wi_init(&hs->hs_rehash_wi, hs,
1069                     cfs_hash_rehash_worker, CFS_WI_SCHED_ANY);
1070         cfs_hash_depth_wi_init(hs);
1071
1072         if (cfs_hash_with_rehash(hs))
1073                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1074
1075         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1076                                                   CFS_HASH_NBKT(hs));
1077         if (hs->hs_buckets != NULL)
1078                 return hs;
1079
1080         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1081         RETURN(NULL);
1082 }
1083 CFS_EXPORT_SYMBOL(cfs_hash_create);
1084
1085 /**
1086  * Cleanup libcfs hash @hs.
1087  */
1088 static void
1089 cfs_hash_destroy(cfs_hash_t *hs)
1090 {
1091         cfs_hlist_node_t     *hnode;
1092         cfs_hlist_node_t     *pos;
1093         cfs_hash_bd_t         bd;
1094         int                   i;
1095         ENTRY;
1096
1097         LASSERT(hs != NULL);
1098         LASSERT(!cfs_hash_is_exiting(hs) &&
1099                 !cfs_hash_is_iterating(hs));
1100
1101         /**
1102          * prohibit further rehashes, don't need any lock because
1103          * I'm the only (last) one can change it.
1104          */
1105         hs->hs_exiting = 1;
1106         if (cfs_hash_with_rehash(hs))
1107                 cfs_hash_rehash_cancel(hs);
1108
1109         cfs_hash_depth_wi_cancel(hs);
1110         /* rehash should be done/canceled */
1111         LASSERT(hs->hs_buckets != NULL &&
1112                 hs->hs_rehash_buckets == NULL);
1113
1114         cfs_hash_for_each_bucket(hs, &bd, i) {
1115                 cfs_hlist_head_t *hhead;
1116
1117                 LASSERT(bd.bd_bucket != NULL);
1118                 /* no need to take this lock, just for consistent code */
1119                 cfs_hash_bd_lock(hs, &bd, 1);
1120
1121                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1122                         cfs_hlist_for_each_safe(hnode, pos, hhead) {
1123                                 LASSERTF(!cfs_hash_with_assert_empty(hs),
1124                                          "hash %s bucket %u(%u) is not "
1125                                          " empty: %u items left\n",
1126                                          hs->hs_name, bd.bd_bucket->hsb_index,
1127                                          bd.bd_offset, bd.bd_bucket->hsb_count);
1128                                 /* can't assert key valicate, because we
1129                                  * can interrupt rehash */
1130                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1131                                 cfs_hash_exit(hs, hnode);
1132                         }
1133                 }
1134                 LASSERT(bd.bd_bucket->hsb_count == 0);
1135                 cfs_hash_bd_unlock(hs, &bd, 1);
1136                 cfs_cond_resched();
1137         }
1138
1139         LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
1140
1141         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1142                               0, CFS_HASH_NBKT(hs));
1143         i = cfs_hash_with_bigname(hs) ?
1144             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1145         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1146
1147         EXIT;
1148 }
1149
1150 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1151 {
1152         if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
1153                 return hs;
1154         return NULL;
1155 }
1156 CFS_EXPORT_SYMBOL(cfs_hash_getref);
1157
1158 void cfs_hash_putref(cfs_hash_t *hs)
1159 {
1160         if (cfs_atomic_dec_and_test(&hs->hs_refcount))
1161                 cfs_hash_destroy(hs);
1162 }
1163 CFS_EXPORT_SYMBOL(cfs_hash_putref);
1164
1165 static inline int
1166 cfs_hash_rehash_bits(cfs_hash_t *hs)
1167 {
1168         if (cfs_hash_with_no_lock(hs) ||
1169             !cfs_hash_with_rehash(hs))
1170                 return -EOPNOTSUPP;
1171
1172         if (unlikely(cfs_hash_is_exiting(hs)))
1173                 return -ESRCH;
1174
1175         if (unlikely(cfs_hash_is_rehashing(hs)))
1176                 return -EALREADY;
1177
1178         if (unlikely(cfs_hash_is_iterating(hs)))
1179                 return -EAGAIN;
1180
1181         /* XXX: need to handle case with max_theta != 2.0
1182          *      and the case with min_theta != 0.5 */
1183         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1184             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1185                 return hs->hs_cur_bits + 1;
1186
1187         if (!cfs_hash_with_shrink(hs))
1188                 return 0;
1189
1190         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1191             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1192                 return hs->hs_cur_bits - 1;
1193
1194         return 0;
1195 }
1196
1197 /**
1198  * don't allow inline rehash if:
1199  * - user wants non-blocking change (add/del) on hash table
1200  * - too many elements
1201  */
1202 static inline int
1203 cfs_hash_rehash_inline(cfs_hash_t *hs)
1204 {
1205         return !cfs_hash_with_nblk_change(hs) &&
1206                cfs_atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1207 }
1208
1209 /**
1210  * Add item @hnode to libcfs hash @hs using @key.  The registered
1211  * ops->hs_get function will be called when the item is added.
1212  */
1213 void
1214 cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1215 {
1216         cfs_hash_bd_t   bd;
1217         int             bits;
1218
1219         LASSERT(cfs_hlist_unhashed(hnode));
1220
1221         cfs_hash_lock(hs, 0);
1222         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1223
1224         cfs_hash_key_validate(hs, key, hnode);
1225         cfs_hash_bd_add_locked(hs, &bd, hnode);
1226
1227         cfs_hash_bd_unlock(hs, &bd, 1);
1228
1229         bits = cfs_hash_rehash_bits(hs);
1230         cfs_hash_unlock(hs, 0);
1231         if (bits > 0)
1232                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1233 }
1234 CFS_EXPORT_SYMBOL(cfs_hash_add);
1235
1236 static cfs_hlist_node_t *
1237 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1238                      cfs_hlist_node_t *hnode, int noref)
1239 {
1240         cfs_hlist_node_t *ehnode;
1241         cfs_hash_bd_t     bds[2];
1242         int               bits = 0;
1243
1244         LASSERT(cfs_hlist_unhashed(hnode));
1245
1246         cfs_hash_lock(hs, 0);
1247         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1248
1249         cfs_hash_key_validate(hs, key, hnode);
1250         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1251                                                  hnode, noref);
1252         cfs_hash_dual_bd_unlock(hs, bds, 1);
1253
1254         if (ehnode == hnode) /* new item added */
1255                 bits = cfs_hash_rehash_bits(hs);
1256         cfs_hash_unlock(hs, 0);
1257         if (bits > 0)
1258                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1259
1260         return ehnode;
1261 }
1262
1263 /**
1264  * Add item @hnode to libcfs hash @hs using @key.  The registered
1265  * ops->hs_get function will be called if the item was added.
1266  * Returns 0 on success or -EALREADY on key collisions.
1267  */
1268 int
1269 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1270 {
1271         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1272                -EALREADY : 0;
1273 }
1274 CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
1275
1276 /**
1277  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1278  * already exists in the hash then ops->hs_get will be called on the
1279  * conflicting entry and that entry will be returned to the caller.
1280  * Otherwise ops->hs_get is called on the item which was added.
1281  */
1282 void *
1283 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1284                         cfs_hlist_node_t *hnode)
1285 {
1286         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1287
1288         return cfs_hash_object(hs, hnode);
1289 }
1290 CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
1291
1292 /**
1293  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1294  * is required to ensure the correct hash bucket is locked since there
1295  * is no direct linkage from the item to the bucket.  The object
1296  * removed from the hash will be returned and obs->hs_put is called
1297  * on the removed object.
1298  */
1299 void *
1300 cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1301 {
1302         void           *obj  = NULL;
1303         int             bits = 0;
1304         cfs_hash_bd_t   bds[2];
1305
1306         cfs_hash_lock(hs, 0);
1307         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1308
1309         if (bds[1].bd_bucket == NULL && hnode != NULL)
1310                 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1311         else
1312                 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, key, hnode);
1313
1314         if (hnode != NULL) {
1315                 obj  = cfs_hash_object(hs, hnode);
1316                 bits = cfs_hash_rehash_bits(hs);
1317         }
1318
1319         cfs_hash_dual_bd_unlock(hs, bds, 1);
1320         cfs_hash_unlock(hs, 0);
1321         if (bits > 0)
1322                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1323
1324         return obj;
1325 }
1326 CFS_EXPORT_SYMBOL(cfs_hash_del);
1327
1328 /**
1329  * Delete item given @key in libcfs hash @hs.  The first @key found in
1330  * the hash will be removed, if the key exists multiple times in the hash
1331  * @hs this function must be called once per key.  The removed object
1332  * will be returned and ops->hs_put is called on the removed object.
1333  */
1334 void *
1335 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1336 {
1337         return cfs_hash_del(hs, key, NULL);
1338 }
1339 CFS_EXPORT_SYMBOL(cfs_hash_del_key);
1340
1341 /**
1342  * Lookup an item using @key in the libcfs hash @hs and return it.
1343  * If the @key is found in the hash hs->hs_get() is called and the
1344  * matching objects is returned.  It is the callers responsibility
1345  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1346  * when when finished with the object.  If the @key was not found
1347  * in the hash @hs NULL is returned.
1348  */
1349 void *
1350 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1351 {
1352         void                 *obj = NULL;
1353         cfs_hlist_node_t     *hnode;
1354         cfs_hash_bd_t         bds[2];
1355
1356         cfs_hash_lock(hs, 0);
1357         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1358
1359         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1360         if (hnode != NULL)
1361                 obj = cfs_hash_object(hs, hnode);
1362
1363         cfs_hash_dual_bd_unlock(hs, bds, 0);
1364         cfs_hash_unlock(hs, 0);
1365
1366         return obj;
1367 }
1368 CFS_EXPORT_SYMBOL(cfs_hash_lookup);
1369
1370 static void
1371 cfs_hash_for_each_enter(cfs_hash_t *hs)
1372 {
1373         LASSERT(!cfs_hash_is_exiting(hs));
1374
1375         if (!cfs_hash_with_rehash(hs))
1376                 return;
1377         /*
1378          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1379          * because it's just an unreliable signal to rehash-thread,
1380          * rehash-thread will try to finsih rehash ASAP when seeing this.
1381          */
1382         hs->hs_iterating = 1;
1383
1384         cfs_hash_lock(hs, 1);
1385         hs->hs_iterators++;
1386
1387         /* NB: iteration is mostly called by service thread,
1388          * we tend to cancel pending rehash-requst, instead of
1389          * blocking service thread, we will relaunch rehash request
1390          * after iteration */
1391         if (cfs_hash_is_rehashing(hs))
1392                 cfs_hash_rehash_cancel_locked(hs);
1393         cfs_hash_unlock(hs, 1);
1394 }
1395
1396 static void
1397 cfs_hash_for_each_exit(cfs_hash_t *hs)
1398 {
1399         int remained;
1400         int bits;
1401
1402         if (!cfs_hash_with_rehash(hs))
1403                 return;
1404         cfs_hash_lock(hs, 1);
1405         remained = --hs->hs_iterators;
1406         bits = cfs_hash_rehash_bits(hs);
1407         cfs_hash_unlock(hs, 1);
1408         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1409         if (remained == 0)
1410                 hs->hs_iterating = 0;
1411         if (bits > 0) {
1412                 cfs_hash_rehash(hs, cfs_atomic_read(&hs->hs_count) <
1413                                     CFS_HASH_LOOP_HOG);
1414         }
1415 }
1416
1417 /**
1418  * For each item in the libcfs hash @hs call the passed callback @func
1419  * and pass to it as an argument each hash item and the private @data.
1420  *
1421  * a) the function may sleep!
1422  * b) during the callback:
1423  *    . the bucket lock is held so the callback must never sleep.
1424  *    . if @removal_safe is true, use can remove current item by
1425  *      cfs_hash_bd_del_locked
1426  */
1427 static __u64
1428 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1429                         void *data, int remove_safe)
1430 {
1431         cfs_hlist_node_t     *hnode;
1432         cfs_hlist_node_t     *pos;
1433         cfs_hash_bd_t         bd;
1434         __u64                 count = 0;
1435         int                   excl  = !!remove_safe;
1436         int                   loop  = 0;
1437         int                   i;
1438         ENTRY;
1439
1440         cfs_hash_for_each_enter(hs);
1441
1442         cfs_hash_lock(hs, 0);
1443         LASSERT(!cfs_hash_is_rehashing(hs));
1444
1445         cfs_hash_for_each_bucket(hs, &bd, i) {
1446                 cfs_hlist_head_t *hhead;
1447
1448                 cfs_hash_bd_lock(hs, &bd, excl);
1449                 if (func == NULL) { /* only glimpse size */
1450                         count += bd.bd_bucket->hsb_count;
1451                         cfs_hash_bd_unlock(hs, &bd, excl);
1452                         continue;
1453                 }
1454
1455                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1456                         cfs_hlist_for_each_safe(hnode, pos, hhead) {
1457                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1458                                 count++;
1459                                 loop++;
1460                                 if (func(hs, &bd, hnode, data)) {
1461                                         cfs_hash_bd_unlock(hs, &bd, excl);
1462                                         goto out;
1463                                 }
1464                         }
1465                 }
1466                 cfs_hash_bd_unlock(hs, &bd, excl);
1467                 if (loop < CFS_HASH_LOOP_HOG)
1468                         continue;
1469                 loop = 0;
1470                 cfs_hash_unlock(hs, 0);
1471                 cfs_cond_resched();
1472                 cfs_hash_lock(hs, 0);
1473         }
1474  out:
1475         cfs_hash_unlock(hs, 0);
1476
1477         cfs_hash_for_each_exit(hs);
1478         RETURN(count);
1479 }
1480
1481 typedef struct {
1482         cfs_hash_cond_opt_cb_t  func;
1483         void                   *arg;
1484 } cfs_hash_cond_arg_t;
1485
1486 static int
1487 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1488                          cfs_hlist_node_t *hnode, void *data)
1489 {
1490         cfs_hash_cond_arg_t *cond = data;
1491
1492         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1493                 cfs_hash_bd_del_locked(hs, bd, hnode);
1494         return 0;
1495 }
1496
1497 /**
1498  * Delete item from the libcfs hash @hs when @func return true.
1499  * The write lock being hold during loop for each bucket to avoid
1500  * any object be reference.
1501  */
1502 void
1503 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1504 {
1505         cfs_hash_cond_arg_t arg = {
1506                 .func   = func,
1507                 .arg    = data,
1508         };
1509
1510         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1511 }
1512 CFS_EXPORT_SYMBOL(cfs_hash_cond_del);
1513
1514 void
1515 cfs_hash_for_each(cfs_hash_t *hs,
1516                   cfs_hash_for_each_cb_t func, void *data)
1517 {
1518         cfs_hash_for_each_tight(hs, func, data, 0);
1519 }
1520 CFS_EXPORT_SYMBOL(cfs_hash_for_each);
1521
1522 void
1523 cfs_hash_for_each_safe(cfs_hash_t *hs,
1524                        cfs_hash_for_each_cb_t func, void *data)
1525 {
1526         cfs_hash_for_each_tight(hs, func, data, 1);
1527 }
1528 CFS_EXPORT_SYMBOL(cfs_hash_for_each_safe);
1529
1530 static int
1531 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1532               cfs_hlist_node_t *hnode, void *data)
1533 {
1534         *(int *)data = 0;
1535         return 1; /* return 1 to break the loop */
1536 }
1537
1538 int
1539 cfs_hash_is_empty(cfs_hash_t *hs)
1540 {
1541         int empty = 1;
1542
1543         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1544         return empty;
1545 }
1546 CFS_EXPORT_SYMBOL(cfs_hash_is_empty);
1547
1548 __u64
1549 cfs_hash_size_get(cfs_hash_t *hs)
1550 {
1551         return cfs_hash_with_counter(hs) ?
1552                cfs_atomic_read(&hs->hs_count) :
1553                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1554 }
1555 CFS_EXPORT_SYMBOL(cfs_hash_size_get);
1556
1557 /*
1558  * cfs_hash_for_each_relax:
1559  * Iterate the hash table and call @func on each item without
1560  * any lock. This function can't guarantee to finish iteration
1561  * if these features are enabled:
1562  *
1563  *  a. if rehash_key is enabled, an item can be moved from
1564  *     one bucket to another bucket
1565  *  b. user can remove non-zero-ref item from hash-table,
1566  *     so the item can be removed from hash-table, even worse,
1567  *     it's possible that user changed key and insert to another
1568  *     hash bucket.
1569  * there's no way for us to finish iteration correctly on previous
1570  * two cases, so iteration has to be stopped on change.
1571  */
1572 static int
1573 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1574 {
1575         cfs_hlist_node_t *hnode;
1576         cfs_hlist_node_t *tmp;
1577         cfs_hash_bd_t     bd;
1578         __u32             version;
1579         int               count = 0;
1580         int               stop_on_change;
1581         int               rc;
1582         int               i;
1583         ENTRY;
1584
1585         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1586                          !cfs_hash_with_no_itemref(hs) ||
1587                          CFS_HOP(hs, put_locked) == NULL;
1588         cfs_hash_lock(hs, 0);
1589         LASSERT(!cfs_hash_is_rehashing(hs));
1590
1591         cfs_hash_for_each_bucket(hs, &bd, i) {
1592                 cfs_hlist_head_t *hhead;
1593
1594                 cfs_hash_bd_lock(hs, &bd, 0);
1595                 version = cfs_hash_bd_version_get(&bd);
1596
1597                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1598                         for (hnode = hhead->first; hnode != NULL;) {
1599                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1600                                 cfs_hash_get(hs, hnode);
1601                                 cfs_hash_bd_unlock(hs, &bd, 0);
1602                                 cfs_hash_unlock(hs, 0);
1603
1604                                 rc = func(hs, &bd, hnode, data);
1605                                 if (stop_on_change)
1606                                         cfs_hash_put(hs, hnode);
1607                                 cfs_cond_resched();
1608                                 count++;
1609
1610                                 cfs_hash_lock(hs, 0);
1611                                 cfs_hash_bd_lock(hs, &bd, 0);
1612                                 if (!stop_on_change) {
1613                                         tmp = hnode->next;
1614                                         cfs_hash_put_locked(hs, hnode);
1615                                         hnode = tmp;
1616                                 } else { /* bucket changed? */
1617                                         if (version !=
1618                                             cfs_hash_bd_version_get(&bd))
1619                                                 break;
1620                                         /* safe to continue because no change */
1621                                         hnode = hnode->next;
1622                                 }
1623                                 if (rc) /* callback wants to break iteration */
1624                                         break;
1625                         }
1626                 }
1627                 cfs_hash_bd_unlock(hs, &bd, 0);
1628         }
1629         cfs_hash_unlock(hs, 0);
1630
1631         return count;
1632 }
1633
1634 int
1635 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1636                          cfs_hash_for_each_cb_t func, void *data)
1637 {
1638         ENTRY;
1639
1640         if (cfs_hash_with_no_lock(hs) ||
1641             cfs_hash_with_rehash_key(hs) ||
1642             !cfs_hash_with_no_itemref(hs))
1643                 RETURN(-EOPNOTSUPP);
1644
1645         if (CFS_HOP(hs, get) == NULL ||
1646             (CFS_HOP(hs, put) == NULL &&
1647              CFS_HOP(hs, put_locked) == NULL))
1648                 RETURN(-EOPNOTSUPP);
1649
1650         cfs_hash_for_each_enter(hs);
1651         cfs_hash_for_each_relax(hs, func, data);
1652         cfs_hash_for_each_exit(hs);
1653
1654         RETURN(0);
1655 }
1656 CFS_EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1657
1658 /**
1659  * For each hash bucket in the libcfs hash @hs call the passed callback
1660  * @func until all the hash buckets are empty.  The passed callback @func
1661  * or the previously registered callback hs->hs_put must remove the item
1662  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1663  * functions.  No rwlocks will be held during the callback @func it is
1664  * safe to sleep if needed.  This function will not terminate until the
1665  * hash is empty.  Note it is still possible to concurrently add new
1666  * items in to the hash.  It is the callers responsibility to ensure
1667  * the required locking is in place to prevent concurrent insertions.
1668  */
1669 int
1670 cfs_hash_for_each_empty(cfs_hash_t *hs,
1671                         cfs_hash_for_each_cb_t func, void *data)
1672 {
1673         unsigned  i = 0;
1674         ENTRY;
1675
1676         if (cfs_hash_with_no_lock(hs))
1677                 return -EOPNOTSUPP;
1678
1679         if (CFS_HOP(hs, get) == NULL ||
1680             (CFS_HOP(hs, put) == NULL &&
1681              CFS_HOP(hs, put_locked) == NULL))
1682                 return -EOPNOTSUPP;
1683
1684         cfs_hash_for_each_enter(hs);
1685         while (cfs_hash_for_each_relax(hs, func, data)) {
1686                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1687                        hs->hs_name, i++);
1688         }
1689         cfs_hash_for_each_exit(hs);
1690         RETURN(0);
1691 }
1692 CFS_EXPORT_SYMBOL(cfs_hash_for_each_empty);
1693
1694 void
1695 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1696                         cfs_hash_for_each_cb_t func, void *data)
1697 {
1698         cfs_hlist_head_t   *hhead;
1699         cfs_hlist_node_t   *hnode;
1700         cfs_hash_bd_t       bd;
1701
1702         cfs_hash_for_each_enter(hs);
1703         cfs_hash_lock(hs, 0);
1704         if (hindex >= CFS_HASH_NHLIST(hs))
1705                 goto out;
1706
1707         cfs_hash_bd_index_set(hs, hindex, &bd);
1708
1709         cfs_hash_bd_lock(hs, &bd, 0);
1710         hhead = cfs_hash_bd_hhead(hs, &bd);
1711         cfs_hlist_for_each(hnode, hhead) {
1712                 if (func(hs, &bd, hnode, data))
1713                         break;
1714         }
1715         cfs_hash_bd_unlock(hs, &bd, 0);
1716  out:
1717         cfs_hash_unlock(hs, 0);
1718         cfs_hash_for_each_exit(hs);
1719 }
1720
1721 CFS_EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1722
1723 /*
1724  * For each item in the libcfs hash @hs which matches the @key call
1725  * the passed callback @func and pass to it as an argument each hash
1726  * item and the private @data. During the callback the bucket lock
1727  * is held so the callback must never sleep.
1728    */
1729 void
1730 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1731                       cfs_hash_for_each_cb_t func, void *data)
1732 {
1733         cfs_hlist_node_t   *hnode;
1734         cfs_hash_bd_t       bds[2];
1735         unsigned            i;
1736
1737         cfs_hash_lock(hs, 0);
1738
1739         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1740
1741         cfs_hash_for_each_bd(bds, 2, i) {
1742                 cfs_hlist_head_t *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1743
1744                 cfs_hlist_for_each(hnode, hlist) {
1745                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1746
1747                         if (cfs_hash_keycmp(hs, key, hnode)) {
1748                                 if (func(hs, &bds[i], hnode, data))
1749                                         break;
1750                         }
1751                 }
1752         }
1753
1754         cfs_hash_dual_bd_unlock(hs, bds, 0);
1755         cfs_hash_unlock(hs, 0);
1756 }
1757 CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
1758
1759 /**
1760  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1761  * to grow the hash size when excessive chaining is detected, or to
1762  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1763  * flag is set in @hs the libcfs hash may be dynamically rehashed
1764  * during addition or removal if the hash's theta value exceeds
1765  * either the hs->hs_min_theta or hs->max_theta values.  By default
1766  * these values are tuned to keep the chained hash depth small, and
1767  * this approach assumes a reasonably uniform hashing function.  The
1768  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1769  */
1770 void
1771 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1772 {
1773         int     i;
1774
1775         /* need hold cfs_hash_lock(hs, 1) */
1776         LASSERT(cfs_hash_with_rehash(hs) &&
1777                 !cfs_hash_with_no_lock(hs));
1778
1779         if (!cfs_hash_is_rehashing(hs))
1780                 return;
1781
1782         if (cfs_wi_cancel(&hs->hs_rehash_wi)) {
1783                 hs->hs_rehash_bits = 0;
1784                 return;
1785         }
1786
1787         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1788                 cfs_hash_unlock(hs, 1);
1789                 /* raise console warning while waiting too long */
1790                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1791                        "hash %s is still rehashing, rescheded %d\n",
1792                        hs->hs_name, i - 1);
1793                 cfs_cond_resched();
1794                 cfs_hash_lock(hs, 1);
1795         }
1796 }
1797 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1798
1799 void
1800 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1801 {
1802         cfs_hash_lock(hs, 1);
1803         cfs_hash_rehash_cancel_locked(hs);
1804         cfs_hash_unlock(hs, 1);
1805 }
1806 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1807
1808 int
1809 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1810 {
1811         int     rc;
1812
1813         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1814
1815         cfs_hash_lock(hs, 1);
1816
1817         rc = cfs_hash_rehash_bits(hs);
1818         if (rc <= 0) {
1819                 cfs_hash_unlock(hs, 1);
1820                 return rc;
1821         }
1822
1823         hs->hs_rehash_bits = rc;
1824         if (!do_rehash) {
1825                 /* launch and return */
1826                 cfs_wi_schedule(&hs->hs_rehash_wi);
1827                 cfs_hash_unlock(hs, 1);
1828                 return 0;
1829         }
1830
1831         /* rehash right now */
1832         cfs_hash_unlock(hs, 1);
1833
1834         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1835 }
1836 CFS_EXPORT_SYMBOL(cfs_hash_rehash);
1837
1838 static int
1839 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1840 {
1841         cfs_hash_bd_t      new;
1842         cfs_hlist_head_t  *hhead;
1843         cfs_hlist_node_t  *hnode;
1844         cfs_hlist_node_t  *pos;
1845         void              *key;
1846         int                c = 0;
1847
1848         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1849         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1850                 cfs_hlist_for_each_safe(hnode, pos, hhead) {
1851                         key = cfs_hash_key(hs, hnode);
1852                         LASSERT(key != NULL);
1853                         /* Validate hnode is in the correct bucket. */
1854                         cfs_hash_bucket_validate(hs, old, hnode);
1855                         /*
1856                          * Delete from old hash bucket; move to new bucket.
1857                          * ops->hs_key must be defined.
1858                          */
1859                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1860                                              hs->hs_rehash_bits, key, &new);
1861                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1862                         c++;
1863                 }
1864         }
1865
1866         return c;
1867 }
1868
1869 static int
1870 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1871 {
1872         cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1873         cfs_hash_bucket_t **bkts;
1874         cfs_hash_bd_t       bd;
1875         unsigned int        old_size;
1876         unsigned int        new_size;
1877         int                 bsize;
1878         int                 count = 0;
1879         int                 rc = 0;
1880         int                 i;
1881
1882         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1883
1884         cfs_hash_lock(hs, 0);
1885         LASSERT(cfs_hash_is_rehashing(hs));
1886
1887         old_size = CFS_HASH_NBKT(hs);
1888         new_size = CFS_HASH_RH_NBKT(hs);
1889
1890         cfs_hash_unlock(hs, 0);
1891
1892         /*
1893          * don't need hs::hs_rwlock for hs::hs_buckets,
1894          * because nobody can change bkt-table except me.
1895          */
1896         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1897                                         old_size, new_size);
1898         cfs_hash_lock(hs, 1);
1899         if (bkts == NULL) {
1900                 rc = -ENOMEM;
1901                 goto out;
1902         }
1903
1904         if (bkts == hs->hs_buckets) {
1905                 bkts = NULL; /* do nothing */
1906                 goto out;
1907         }
1908
1909         rc = __cfs_hash_theta(hs);
1910         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1911                 /* free the new allocated bkt-table */
1912                 old_size = new_size;
1913                 new_size = CFS_HASH_NBKT(hs);
1914                 rc = -EALREADY;
1915                 goto out;
1916         }
1917
1918         LASSERT(hs->hs_rehash_buckets == NULL);
1919         hs->hs_rehash_buckets = bkts;
1920
1921         rc = 0;
1922         cfs_hash_for_each_bucket(hs, &bd, i) {
1923                 if (cfs_hash_is_exiting(hs)) {
1924                         rc = -ESRCH;
1925                         /* someone wants to destroy the hash, abort now */
1926                         if (old_size < new_size) /* OK to free old bkt-table */
1927                                 break;
1928                         /* it's shrinking, need free new bkt-table */
1929                         hs->hs_rehash_buckets = NULL;
1930                         old_size = new_size;
1931                         new_size = CFS_HASH_NBKT(hs);
1932                         goto out;
1933                 }
1934
1935                 count += cfs_hash_rehash_bd(hs, &bd);
1936                 if (count < CFS_HASH_LOOP_HOG ||
1937                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1938                         continue;
1939                 }
1940
1941                 count = 0;
1942                 cfs_hash_unlock(hs, 1);
1943                 cfs_cond_resched();
1944                 cfs_hash_lock(hs, 1);
1945         }
1946
1947         hs->hs_rehash_count++;
1948
1949         bkts = hs->hs_buckets;
1950         hs->hs_buckets = hs->hs_rehash_buckets;
1951         hs->hs_rehash_buckets = NULL;
1952
1953         hs->hs_cur_bits = hs->hs_rehash_bits;
1954  out:
1955         hs->hs_rehash_bits = 0;
1956         if (rc == -ESRCH)
1957                 cfs_wi_exit(wi); /* never be scheduled again */
1958         bsize = cfs_hash_bkt_size(hs);
1959         cfs_hash_unlock(hs, 1);
1960         /* can't refer to @hs anymore because it could be destroyed */
1961         if (bkts != NULL)
1962                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1963         if (rc != 0)
1964                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1965         /* cfs_workitem require us to always return 0 */
1966         return 0;
1967 }
1968
1969 /**
1970  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1971  * @old_key must be provided to locate the objects previous location
1972  * in the hash, and the @new_key will be used to reinsert the object.
1973  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1974  * combo when it is critical that there is no window in time where the
1975  * object is missing from the hash.  When an object is being rehashed
1976  * the registered cfs_hash_get() and cfs_hash_put() functions will
1977  * not be called.
1978  */
1979 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1980                          void *new_key, cfs_hlist_node_t *hnode)
1981 {
1982         cfs_hash_bd_t        bds[3];
1983         cfs_hash_bd_t        old_bds[2];
1984         cfs_hash_bd_t        new_bd;
1985
1986         LASSERT(!cfs_hlist_unhashed(hnode));
1987
1988         cfs_hash_lock(hs, 0);
1989
1990         cfs_hash_dual_bd_get(hs, old_key, old_bds);
1991         cfs_hash_bd_get(hs, new_key, &new_bd);
1992
1993         bds[0] = old_bds[0];
1994         bds[1] = old_bds[1];
1995         bds[2] = new_bd;
1996
1997         /* NB: bds[0] and bds[1] are ordered already */
1998         cfs_hash_bd_order(&bds[1], &bds[2]);
1999         cfs_hash_bd_order(&bds[0], &bds[1]);
2000
2001         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2002         if (likely(old_bds[1].bd_bucket == NULL)) {
2003                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2004         } else {
2005                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2006                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2007         }
2008         /* overwrite key inside locks, otherwise may screw up with
2009          * other operations, i.e: rehash */
2010         cfs_hash_keycpy(hs, new_key, hnode);
2011
2012         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2013         cfs_hash_unlock(hs, 0);
2014 }
2015 CFS_EXPORT_SYMBOL(cfs_hash_rehash_key);
2016
2017 int cfs_hash_debug_header(char *str, int size)
2018 {
2019         return snprintf(str, size, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2020                  CFS_HASH_BIGNAME_LEN,
2021                  "name", "cur", "min", "max", "theta", "t-min", "t-max",
2022                  "flags", "rehash", "count", "maxdep", "maxdepb",
2023                  " distribution");
2024 }
2025 CFS_EXPORT_SYMBOL(cfs_hash_debug_header);
2026
2027 static cfs_hash_bucket_t **
2028 cfs_hash_full_bkts(cfs_hash_t *hs)
2029 {
2030         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2031         if (hs->hs_rehash_buckets == NULL)
2032                 return hs->hs_buckets;
2033
2034         LASSERT(hs->hs_rehash_bits != 0);
2035         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2036                hs->hs_rehash_buckets : hs->hs_buckets;
2037 }
2038
2039 static unsigned int
2040 cfs_hash_full_nbkt(cfs_hash_t *hs)
2041 {
2042         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2043         if (hs->hs_rehash_buckets == NULL)
2044                 return CFS_HASH_NBKT(hs);
2045
2046         LASSERT(hs->hs_rehash_bits != 0);
2047         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2048                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2049 }
2050
2051 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
2052 {
2053         int                    dist[8] = { 0, };
2054         int                    maxdep  = -1;
2055         int                    maxdepb = -1;
2056         int                    total   = 0;
2057         int                    c       = 0;
2058         int                    theta;
2059         int                    i;
2060
2061         if (str == NULL || size == 0)
2062                 return 0;
2063
2064         cfs_hash_lock(hs, 0);
2065         theta = __cfs_hash_theta(hs);
2066
2067         c += snprintf(str + c, size - c, "%-*s ",
2068                       CFS_HASH_BIGNAME_LEN, hs->hs_name);
2069         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_cur_bits);
2070         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_min_bits);
2071         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_max_bits);
2072         c += snprintf(str + c, size - c, "%d.%03d ",
2073                       __cfs_hash_theta_int(theta),
2074                       __cfs_hash_theta_frac(theta));
2075         c += snprintf(str + c, size - c, "%d.%03d ",
2076                       __cfs_hash_theta_int(hs->hs_min_theta),
2077                       __cfs_hash_theta_frac(hs->hs_min_theta));
2078         c += snprintf(str + c, size - c, "%d.%03d ",
2079                       __cfs_hash_theta_int(hs->hs_max_theta),
2080                       __cfs_hash_theta_frac(hs->hs_max_theta));
2081         c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
2082         c += snprintf(str + c, size - c, "%6d ", hs->hs_rehash_count);
2083
2084         /*
2085          * The distribution is a summary of the chained hash depth in
2086          * each of the libcfs hash buckets.  Each buckets hsb_count is
2087          * divided by the hash theta value and used to generate a
2088          * histogram of the hash distribution.  A uniform hash will
2089          * result in all hash buckets being close to the average thus
2090          * only the first few entries in the histogram will be non-zero.
2091          * If you hash function results in a non-uniform hash the will
2092          * be observable by outlier bucks in the distribution histogram.
2093          *
2094          * Uniform hash distribution:      128/128/0/0/0/0/0/0
2095          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2096          */
2097         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2098                 cfs_hash_bd_t  bd;
2099
2100                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2101                 cfs_hash_bd_lock(hs, &bd, 0);
2102                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2103                         maxdep  = bd.bd_bucket->hsb_depmax;
2104 #ifdef __KERNEL__
2105                         maxdepb = cfs_ffz(~maxdep);
2106 #endif
2107                 }
2108                 total += bd.bd_bucket->hsb_count;
2109                 dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2110                 cfs_hash_bd_unlock(hs, &bd, 0);
2111         }
2112
2113         c += snprintf(str + c, size - c, "%7d ", total);
2114         c += snprintf(str + c, size - c, "%7d ", maxdep);
2115         c += snprintf(str + c, size - c, "%7d ", maxdepb);
2116         for (i = 0; i < 8; i++)
2117                 c += snprintf(str + c, size - c, "%d%c",  dist[i],
2118                               (i == 7) ? '\n' : '/');
2119
2120         cfs_hash_unlock(hs, 0);
2121
2122         return c;
2123 }
2124 CFS_EXPORT_SYMBOL(cfs_hash_debug_str);