Whamcloud - gitweb
LU-1146 build: batch update copyright messages
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  *
32  * Copyright (c) 2011, Whamcloud, Inc.
33  */
34 /*
35  * This file is part of Lustre, http://www.lustre.org/
36  * Lustre is a trademark of Sun Microsystems, Inc.
37  *
38  * libcfs/libcfs/hash.c
39  *
40  * Implement a hash class for hash process in lustre system.
41  *
42  * Author: YuZhangyong <yzy@clusterfs.com>
43  *
44  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
45  * - Simplified API and improved documentation
46  * - Added per-hash feature flags:
47  *   * CFS_HASH_DEBUG additional validation
48  *   * CFS_HASH_REHASH dynamic rehashing
49  * - Added per-hash statistics
50  * - General performance enhancements
51  *
52  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
53  * - move all stuff to libcfs
54  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
55  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
56  * - buckets are allocated one by one(intead of contiguous memory),
57  *   to avoid unnecessary cacheline conflict
58  *
59  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
60  * - "bucket" is a group of hlist_head now, user can speicify bucket size
61  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
62  *   one lock for reducing memory overhead.
63  *
64  * - support lockless hash, caller will take care of locks:
65  *   avoid lock overhead for hash tables that are already protected
66  *   by locking in the caller for another reason
67  *
68  * - support both spin_lock/rwlock for bucket:
69  *   overhead of spinlock contention is lower than read/write
70  *   contention of rwlock, so using spinlock to serialize operations on
71  *   bucket is more reasonable for those frequently changed hash tables
72  *
73  * - support one-single lock mode:
74  *   one lock to protect all hash operations to avoid overhead of
75  *   multiple locks if hash table is always small
76  *
77  * - removed a lot of unnecessary addref & decref on hash element:
78  *   addref & decref are atomic operations in many use-cases which
79  *   are expensive.
80  *
81  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
82  *   some lustre use-cases require these functions to be strictly
83  *   non-blocking, we need to schedule required rehash on a different
84  *   thread on those cases.
85  *
86  * - safer rehash on large hash table
87  *   In old implementation, rehash function will exclusively lock the
88  *   hash table and finish rehash in one batch, it's dangerous on SMP
89  *   system because rehash millions of elements could take long time.
90  *   New implemented rehash can release lock and relax CPU in middle
91  *   of rehash, it's safe for another thread to search/change on the
92  *   hash table even it's in rehasing.
93  *
94  * - support two different refcount modes
95  *   . hash table has refcount on element
96  *   . hash table doesn't change refcount on adding/removing element
97  *
98  * - support long name hash table (for param-tree)
99  *
100  * - fix a bug for cfs_hash_rehash_key:
101  *   in old implementation, cfs_hash_rehash_key could screw up the
102  *   hash-table because @key is overwritten without any protection.
103  *   Now we need user to define hs_keycpy for those rehash enabled
104  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
105  *   inside lock by calling hs_keycpy.
106  *
107  * - better hash iteration:
108  *   Now we support both locked iteration & lockless iteration of hash
109  *   table. Also, user can break the iteration by return 1 in callback.
110  */
111
112 #include <libcfs/libcfs.h>
113
114 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
115 static unsigned int warn_on_depth = 8;
116 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
117                 "warning when hash depth is high.");
118 #endif
119
120 static inline void
121 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
122
123 static inline void
124 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
125
126 static inline void
127 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
128 {
129         cfs_spin_lock(&lock->spin);
130 }
131
132 static inline void
133 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
134 {
135         cfs_spin_unlock(&lock->spin);
136 }
137
138 static inline void
139 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
140 {
141         if (!exclusive)
142                 cfs_read_lock(&lock->rw);
143         else
144                 cfs_write_lock(&lock->rw);
145 }
146
147 static inline void
148 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
149 {
150         if (!exclusive)
151                 cfs_read_unlock(&lock->rw);
152         else
153                 cfs_write_unlock(&lock->rw);
154 }
155
156 /** No lock hash */
157 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
158 {
159         .hs_lock        = cfs_hash_nl_lock,
160         .hs_unlock      = cfs_hash_nl_unlock,
161         .hs_bkt_lock    = cfs_hash_nl_lock,
162         .hs_bkt_unlock  = cfs_hash_nl_unlock,
163 };
164
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
167 {
168         .hs_lock        = cfs_hash_spin_lock,
169         .hs_unlock      = cfs_hash_spin_unlock,
170         .hs_bkt_lock    = cfs_hash_nl_lock,
171         .hs_bkt_unlock  = cfs_hash_nl_unlock,
172 };
173
174 /** spin bucket lock, rehash is enabled */
175 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
176 {
177         .hs_lock        = cfs_hash_rw_lock,
178         .hs_unlock      = cfs_hash_rw_unlock,
179         .hs_bkt_lock    = cfs_hash_spin_lock,
180         .hs_bkt_unlock  = cfs_hash_spin_unlock,
181 };
182
183 /** rw bucket lock, rehash is enabled */
184 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
185 {
186         .hs_lock        = cfs_hash_rw_lock,
187         .hs_unlock      = cfs_hash_rw_unlock,
188         .hs_bkt_lock    = cfs_hash_rw_lock,
189         .hs_bkt_unlock  = cfs_hash_rw_unlock,
190 };
191
192 /** spin bucket lock, rehash is disabled */
193 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
194 {
195         .hs_lock        = cfs_hash_nl_lock,
196         .hs_unlock      = cfs_hash_nl_unlock,
197         .hs_bkt_lock    = cfs_hash_spin_lock,
198         .hs_bkt_unlock  = cfs_hash_spin_unlock,
199 };
200
201 /** rw bucket lock, rehash is disabled */
202 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
203 {
204         .hs_lock        = cfs_hash_nl_lock,
205         .hs_unlock      = cfs_hash_nl_unlock,
206         .hs_bkt_lock    = cfs_hash_rw_lock,
207         .hs_bkt_unlock  = cfs_hash_rw_unlock,
208 };
209
210 static void
211 cfs_hash_lock_setup(cfs_hash_t *hs)
212 {
213         if (cfs_hash_with_no_lock(hs)) {
214                 hs->hs_lops = &cfs_hash_nl_lops;
215
216         } else if (cfs_hash_with_no_bktlock(hs)) {
217                 hs->hs_lops = &cfs_hash_nbl_lops;
218                 cfs_spin_lock_init(&hs->hs_lock.spin);
219
220         } else if (cfs_hash_with_rehash(hs)) {
221                 cfs_rwlock_init(&hs->hs_lock.rw);
222
223                 if (cfs_hash_with_rw_bktlock(hs))
224                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
225                 else if (cfs_hash_with_spin_bktlock(hs))
226                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
227                 else
228                         LBUG();
229         } else {
230                 if (cfs_hash_with_rw_bktlock(hs))
231                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
232                 else if (cfs_hash_with_spin_bktlock(hs))
233                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
234                 else
235                         LBUG();
236         }
237 }
238
239 /**
240  * Simple hash head without depth tracking
241  * new element is always added to head of hlist
242  */
243 typedef struct {
244         cfs_hlist_head_t        hh_head;        /**< entries list */
245 } cfs_hash_head_t;
246
247 static int
248 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
249 {
250         return sizeof(cfs_hash_head_t);
251 }
252
253 static cfs_hlist_head_t *
254 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
255 {
256         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
257
258         return &head[bd->bd_offset].hh_head;
259 }
260
261 static int
262 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
263                       cfs_hlist_node_t *hnode)
264 {
265         cfs_hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266         return -1; /* unknown depth */
267 }
268
269 static int
270 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
271                       cfs_hlist_node_t *hnode)
272 {
273         cfs_hlist_del_init(hnode);
274         return -1; /* unknown depth */
275 }
276
277 /**
278  * Simple hash head with depth tracking
279  * new element is always added to head of hlist
280  */
281 typedef struct {
282         cfs_hlist_head_t        hd_head;        /**< entries list */
283         unsigned int            hd_depth;       /**< list length */
284 } cfs_hash_head_dep_t;
285
286 static int
287 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
288 {
289         return sizeof(cfs_hash_head_dep_t);
290 }
291
292 static cfs_hlist_head_t *
293 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
294 {
295         cfs_hash_head_dep_t   *head;
296
297         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
298         return &head[bd->bd_offset].hd_head;
299 }
300
301 static int
302 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
303                       cfs_hlist_node_t *hnode)
304 {
305         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
306                                                cfs_hash_head_dep_t, hd_head);
307         cfs_hlist_add_head(hnode, &hh->hd_head);
308         return ++hh->hd_depth;
309 }
310
311 static int
312 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
313                       cfs_hlist_node_t *hnode)
314 {
315         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
316                                                cfs_hash_head_dep_t, hd_head);
317         cfs_hlist_del_init(hnode);
318         return --hh->hd_depth;
319 }
320
321 /**
322  * double links hash head without depth tracking
323  * new element is always added to tail of hlist
324  */
325 typedef struct {
326         cfs_hlist_head_t        dh_head;        /**< entries list */
327         cfs_hlist_node_t       *dh_tail;        /**< the last entry */
328 } cfs_hash_dhead_t;
329
330 static int
331 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
332 {
333         return sizeof(cfs_hash_dhead_t);
334 }
335
336 static cfs_hlist_head_t *
337 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
338 {
339         cfs_hash_dhead_t *head;
340
341         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
342         return &head[bd->bd_offset].dh_head;
343 }
344
345 static int
346 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
347                       cfs_hlist_node_t *hnode)
348 {
349         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
350                                             cfs_hash_dhead_t, dh_head);
351
352         if (dh->dh_tail != NULL) /* not empty */
353                 cfs_hlist_add_after(dh->dh_tail, hnode);
354         else /* empty list */
355                 cfs_hlist_add_head(hnode, &dh->dh_head);
356         dh->dh_tail = hnode;
357         return -1; /* unknown depth */
358 }
359
360 static int
361 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
362                       cfs_hlist_node_t *hnd)
363 {
364         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
365                                             cfs_hash_dhead_t, dh_head);
366
367         if (hnd->next == NULL) { /* it's the tail */
368                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
369                               container_of(hnd->pprev, cfs_hlist_node_t, next);
370         }
371         cfs_hlist_del_init(hnd);
372         return -1; /* unknown depth */
373 }
374
375 /**
376  * double links hash head with depth tracking
377  * new element is always added to tail of hlist
378  */
379 typedef struct {
380         cfs_hlist_head_t        dd_head;        /**< entries list */
381         cfs_hlist_node_t       *dd_tail;        /**< the last entry */
382         unsigned int            dd_depth;       /**< list length */
383 } cfs_hash_dhead_dep_t;
384
385 static int
386 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
387 {
388         return sizeof(cfs_hash_dhead_dep_t);
389 }
390
391 static cfs_hlist_head_t *
392 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
393 {
394         cfs_hash_dhead_dep_t *head;
395
396         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
397         return &head[bd->bd_offset].dd_head;
398 }
399
400 static int
401 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
402                       cfs_hlist_node_t *hnode)
403 {
404         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
405                                                 cfs_hash_dhead_dep_t, dd_head);
406
407         if (dh->dd_tail != NULL) /* not empty */
408                 cfs_hlist_add_after(dh->dd_tail, hnode);
409         else /* empty list */
410                 cfs_hlist_add_head(hnode, &dh->dd_head);
411         dh->dd_tail = hnode;
412         return ++dh->dd_depth;
413 }
414
415 static int
416 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
417                       cfs_hlist_node_t *hnd)
418 {
419         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
420                                                 cfs_hash_dhead_dep_t, dd_head);
421
422         if (hnd->next == NULL) { /* it's the tail */
423                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
424                               container_of(hnd->pprev, cfs_hlist_node_t, next);
425         }
426         cfs_hlist_del_init(hnd);
427         return --dh->dd_depth;
428 }
429
430 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
431        .hop_hhead      = cfs_hash_hh_hhead,
432        .hop_hhead_size = cfs_hash_hh_hhead_size,
433        .hop_hnode_add  = cfs_hash_hh_hnode_add,
434        .hop_hnode_del  = cfs_hash_hh_hnode_del,
435 };
436
437 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
438        .hop_hhead      = cfs_hash_hd_hhead,
439        .hop_hhead_size = cfs_hash_hd_hhead_size,
440        .hop_hnode_add  = cfs_hash_hd_hnode_add,
441        .hop_hnode_del  = cfs_hash_hd_hnode_del,
442 };
443
444 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
445        .hop_hhead      = cfs_hash_dh_hhead,
446        .hop_hhead_size = cfs_hash_dh_hhead_size,
447        .hop_hnode_add  = cfs_hash_dh_hnode_add,
448        .hop_hnode_del  = cfs_hash_dh_hnode_del,
449 };
450
451 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
452        .hop_hhead      = cfs_hash_dd_hhead,
453        .hop_hhead_size = cfs_hash_dd_hhead_size,
454        .hop_hnode_add  = cfs_hash_dd_hnode_add,
455        .hop_hnode_del  = cfs_hash_dd_hnode_del,
456 };
457
458 static void
459 cfs_hash_hlist_setup(cfs_hash_t *hs)
460 {
461         if (cfs_hash_with_add_tail(hs)) {
462                 hs->hs_hops = cfs_hash_with_depth(hs) ?
463                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
464         } else {
465                 hs->hs_hops = cfs_hash_with_depth(hs) ?
466                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
467         }
468 }
469
470 static void
471 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
472                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
473 {
474         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
475
476         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
477
478         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
479         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
480 }
481
482 void
483 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
484 {
485         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
486         if (likely(hs->hs_rehash_buckets == NULL)) {
487                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
488                                      hs->hs_cur_bits, key, bd);
489         } else {
490                 LASSERT(hs->hs_rehash_bits != 0);
491                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
492                                      hs->hs_rehash_bits, key, bd);
493         }
494 }
495 CFS_EXPORT_SYMBOL(cfs_hash_bd_get);
496
497 static inline void
498 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
499 {
500         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
501                 return;
502
503         bd->bd_bucket->hsb_depmax = dep_cur;
504 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
505         if (likely(warn_on_depth == 0 ||
506                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
507                 return;
508
509         cfs_spin_lock(&hs->hs_dep_lock);
510         hs->hs_dep_max  = dep_cur;
511         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
512         hs->hs_dep_off  = bd->bd_offset;
513         hs->hs_dep_bits = hs->hs_cur_bits;
514         cfs_spin_unlock(&hs->hs_dep_lock);
515
516         cfs_wi_schedule(&hs->hs_dep_wi);
517 # endif
518 }
519
520 void
521 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
522                        cfs_hlist_node_t *hnode)
523 {
524         int                rc;
525
526         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
527         cfs_hash_bd_dep_record(hs, bd, rc);
528         bd->bd_bucket->hsb_version++;
529         if (unlikely(bd->bd_bucket->hsb_version == 0))
530                 bd->bd_bucket->hsb_version++;
531         bd->bd_bucket->hsb_count++;
532
533         if (cfs_hash_with_counter(hs))
534                 cfs_atomic_inc(&hs->hs_count);
535         if (!cfs_hash_with_no_itemref(hs))
536                 cfs_hash_get(hs, hnode);
537 }
538 CFS_EXPORT_SYMBOL(cfs_hash_bd_add_locked);
539
540 void
541 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
542                        cfs_hlist_node_t *hnode)
543 {
544         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
545
546         LASSERT(bd->bd_bucket->hsb_count > 0);
547         bd->bd_bucket->hsb_count--;
548         bd->bd_bucket->hsb_version++;
549         if (unlikely(bd->bd_bucket->hsb_version == 0))
550                 bd->bd_bucket->hsb_version++;
551
552         if (cfs_hash_with_counter(hs)) {
553                 LASSERT(cfs_atomic_read(&hs->hs_count) > 0);
554                 cfs_atomic_dec(&hs->hs_count);
555         }
556         if (!cfs_hash_with_no_itemref(hs))
557                 cfs_hash_put_locked(hs, hnode);
558 }
559 CFS_EXPORT_SYMBOL(cfs_hash_bd_del_locked);
560
561 void
562 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
563                         cfs_hash_bd_t *bd_new, cfs_hlist_node_t *hnode)
564 {
565         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
566         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
567         int                rc;
568
569         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
570                 return;
571
572         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
573          * in cfs_hash_bd_del/add_locked */
574         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
575         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
576         cfs_hash_bd_dep_record(hs, bd_new, rc);
577
578         LASSERT(obkt->hsb_count > 0);
579         obkt->hsb_count--;
580         obkt->hsb_version++;
581         if (unlikely(obkt->hsb_version == 0))
582                 obkt->hsb_version++;
583         nbkt->hsb_count++;
584         nbkt->hsb_version++;
585         if (unlikely(nbkt->hsb_version == 0))
586                 nbkt->hsb_version++;
587 }
588 CFS_EXPORT_SYMBOL(cfs_hash_bd_move_locked);
589
590 enum {
591         /** always set, for sanity (avoid ZERO intent) */
592         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
593         /** return entry with a ref */
594         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
595         /** add entry if not existing */
596         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
597         /** delete entry, ignore other masks */
598         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
599 };
600
601 typedef enum cfs_hash_lookup_intent {
602         /** return item w/o refcount */
603         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
604         /** return item with refcount */
605         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
606                                        CFS_HS_LOOKUP_MASK_REF),
607         /** return item w/o refcount if existed, otherwise add */
608         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
609                                        CFS_HS_LOOKUP_MASK_ADD),
610         /** return item with refcount if existed, otherwise add */
611         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
612                                        CFS_HS_LOOKUP_MASK_ADD),
613         /** delete if existed */
614         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
615                                        CFS_HS_LOOKUP_MASK_DEL)
616 } cfs_hash_lookup_intent_t;
617
618 static cfs_hlist_node_t *
619 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
620                           const void *key, cfs_hlist_node_t *hnode,
621                           cfs_hash_lookup_intent_t intent)
622
623 {
624         cfs_hlist_head_t  *hhead = cfs_hash_bd_hhead(hs, bd);
625         cfs_hlist_node_t  *ehnode;
626         cfs_hlist_node_t  *match;
627         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
628
629         /* with this function, we can avoid a lot of useless refcount ops,
630          * which are expensive atomic operations most time. */
631         match = intent_add ? NULL : hnode;
632         cfs_hlist_for_each(ehnode, hhead) {
633                 if (!cfs_hash_keycmp(hs, key, ehnode))
634                         continue;
635
636                 if (match != NULL && match != ehnode) /* can't match */
637                         continue;
638
639                 /* match and ... */
640                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
641                         cfs_hash_bd_del_locked(hs, bd, ehnode);
642                         return ehnode;
643                 }
644
645                 /* caller wants refcount? */
646                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
647                         cfs_hash_get(hs, ehnode);
648                 return ehnode;
649         }
650         /* no match item */
651         if (!intent_add)
652                 return NULL;
653
654         LASSERT(hnode != NULL);
655         cfs_hash_bd_add_locked(hs, bd, hnode);
656         return hnode;
657 }
658
659 cfs_hlist_node_t *
660 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
661 {
662         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
663                                          CFS_HS_LOOKUP_IT_FIND);
664 }
665 CFS_EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
666
667 cfs_hlist_node_t *
668 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
669                            const void *key, cfs_hlist_node_t *hnode,
670                            int noref)
671 {
672         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
673                                          CFS_HS_LOOKUP_IT_ADD |
674                                          (!noref * CFS_HS_LOOKUP_MASK_REF));
675 }
676 CFS_EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
677
678 cfs_hlist_node_t *
679 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
680                            const void *key, cfs_hlist_node_t *hnode)
681 {
682         /* hnode can be NULL, we find the first item with @key */
683         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
684                                          CFS_HS_LOOKUP_IT_FINDDEL);
685 }
686 CFS_EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
687
688 static void
689 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
690                        unsigned n, int excl)
691 {
692         cfs_hash_bucket_t *prev = NULL;
693         int                i;
694
695         /**
696          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
697          * NB: it's possible that several bds point to the same bucket but
698          * have different bd::bd_offset, so need take care of deadlock.
699          */
700         cfs_hash_for_each_bd(bds, n, i) {
701                 if (prev == bds[i].bd_bucket)
702                         continue;
703
704                 LASSERT(prev == NULL ||
705                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
706                 cfs_hash_bd_lock(hs, &bds[i], excl);
707                 prev = bds[i].bd_bucket;
708         }
709 }
710
711 static void
712 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
713                          unsigned n, int excl)
714 {
715         cfs_hash_bucket_t *prev = NULL;
716         int                i;
717
718         cfs_hash_for_each_bd(bds, n, i) {
719                 if (prev != bds[i].bd_bucket) {
720                         cfs_hash_bd_unlock(hs, &bds[i], excl);
721                         prev = bds[i].bd_bucket;
722                 }
723         }
724 }
725
726 static cfs_hlist_node_t *
727 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
728                                 unsigned n, const void *key)
729 {
730         cfs_hlist_node_t  *ehnode;
731         unsigned           i;
732
733         cfs_hash_for_each_bd(bds, n, i) {
734                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
735                                                    CFS_HS_LOOKUP_IT_FIND);
736                 if (ehnode != NULL)
737                         return ehnode;
738         }
739         return NULL;
740 }
741
742 static cfs_hlist_node_t *
743 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
744                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
745                                  cfs_hlist_node_t *hnode, int noref)
746 {
747         cfs_hlist_node_t  *ehnode;
748         int                intent;
749         unsigned           i;
750
751         LASSERT(hnode != NULL);
752         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
753
754         cfs_hash_for_each_bd(bds, n, i) {
755                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
756                                                    NULL, intent);
757                 if (ehnode != NULL)
758                         return ehnode;
759         }
760
761         if (i == 1) { /* only one bucket */
762                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
763         } else {
764                 cfs_hash_bd_t      mybd;
765
766                 cfs_hash_bd_get(hs, key, &mybd);
767                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
768         }
769
770         return hnode;
771 }
772
773 static cfs_hlist_node_t *
774 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
775                                  unsigned n, const void *key,
776                                  cfs_hlist_node_t *hnode)
777 {
778         cfs_hlist_node_t  *ehnode;
779         unsigned           i;
780
781         cfs_hash_for_each_bd(bds, n, i) {
782                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
783                                                    CFS_HS_LOOKUP_IT_FINDDEL);
784                 if (ehnode != NULL)
785                         return ehnode;
786         }
787         return NULL;
788 }
789
790 static void
791 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
792 {
793         int     rc;
794
795         if (bd2->bd_bucket == NULL)
796                 return;
797
798         if (bd1->bd_bucket == NULL) {
799                 *bd1 = *bd2;
800                 bd2->bd_bucket = NULL;
801                 return;
802         }
803
804         rc = cfs_hash_bd_compare(bd1, bd2);
805         if (rc == 0) {
806                 bd2->bd_bucket = NULL;
807
808         } else if (rc > 0) { /* swab bd1 and bd2 */
809                 cfs_hash_bd_t tmp;
810
811                 tmp = *bd2;
812                 *bd2 = *bd1;
813                 *bd1 = tmp;
814         }
815 }
816
817 void
818 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
819 {
820         /* NB: caller should hold hs_lock.rw if REHASH is set */
821         cfs_hash_bd_from_key(hs, hs->hs_buckets,
822                              hs->hs_cur_bits, key, &bds[0]);
823         if (likely(hs->hs_rehash_buckets == NULL)) {
824                 /* no rehash or not rehashing */
825                 bds[1].bd_bucket = NULL;
826                 return;
827         }
828
829         LASSERT(hs->hs_rehash_bits != 0);
830         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
831                              hs->hs_rehash_bits, key, &bds[1]);
832
833         cfs_hash_bd_order(&bds[0], &bds[1]);
834 }
835 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_get);
836
837 void
838 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
839 {
840         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
841 }
842 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
843
844 void
845 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
846 {
847         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
848 }
849 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
850
851 cfs_hlist_node_t *
852 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
853                                const void *key)
854 {
855         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
856 }
857 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
858
859 cfs_hlist_node_t *
860 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
861                                 const void *key, cfs_hlist_node_t *hnode,
862                                 int noref)
863 {
864         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
865                                                 hnode, noref);
866 }
867 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
868
869 cfs_hlist_node_t *
870 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
871                                 const void *key, cfs_hlist_node_t *hnode)
872 {
873         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
874 }
875 CFS_EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
876
877 static void
878 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
879                       int bkt_size, int prev_size, int size)
880 {
881         int     i;
882
883         for (i = prev_size; i < size; i++) {
884                 if (buckets[i] != NULL)
885                         LIBCFS_FREE(buckets[i], bkt_size);
886         }
887
888         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
889 }
890
891 /*
892  * Create or grow bucket memory. Return old_buckets if no allocation was
893  * needed, the newly allocated buckets if allocation was needed and
894  * successful, and NULL on error.
895  */
896 static cfs_hash_bucket_t **
897 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
898                          unsigned int old_size, unsigned int new_size)
899 {
900         cfs_hash_bucket_t **new_bkts;
901         int                 i;
902
903         LASSERT(old_size == 0 || old_bkts != NULL);
904
905         if (old_bkts != NULL && old_size == new_size)
906                 return old_bkts;
907
908         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
909         if (new_bkts == NULL)
910                 return NULL;
911
912         if (old_bkts != NULL) {
913                 memcpy(new_bkts, old_bkts,
914                        min(old_size, new_size) * sizeof(*old_bkts));
915         }
916
917         for (i = old_size; i < new_size; i++) {
918                 cfs_hlist_head_t *hhead;
919                 cfs_hash_bd_t     bd;
920
921                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
922                 if (new_bkts[i] == NULL) {
923                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
924                                               old_size, new_size);
925                         return NULL;
926                 }
927
928                 new_bkts[i]->hsb_index   = i;
929                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
930                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
931                 bd.bd_bucket = new_bkts[i];
932                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
933                         CFS_INIT_HLIST_HEAD(hhead);
934
935                 if (cfs_hash_with_no_lock(hs) ||
936                     cfs_hash_with_no_bktlock(hs))
937                         continue;
938
939                 if (cfs_hash_with_rw_bktlock(hs))
940                         cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
941                 else if (cfs_hash_with_spin_bktlock(hs))
942                         cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
943                 else
944                         LBUG(); /* invalid use-case */
945         }
946         return new_bkts;
947 }
948
949 /**
950  * Initialize new libcfs hash, where:
951  * @name     - Descriptive hash name
952  * @cur_bits - Initial hash table size, in bits
953  * @max_bits - Maximum allowed hash table resize, in bits
954  * @ops      - Registered hash table operations
955  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
956  *           - CFS_HASH_SORT enable chained hash sort
957  */
958 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
959
960 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
961 static int cfs_hash_dep_print(cfs_workitem_t *wi)
962 {
963         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
964         int         dep;
965         int         bkt;
966         int         off;
967         int         bits;
968
969         cfs_spin_lock(&hs->hs_dep_lock);
970         dep  = hs->hs_dep_max;
971         bkt  = hs->hs_dep_bkt;
972         off  = hs->hs_dep_off;
973         bits = hs->hs_dep_bits;
974         cfs_spin_unlock(&hs->hs_dep_lock);
975
976         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
977                       hs->hs_name, bits, dep, bkt, off);
978         cfs_spin_lock(&hs->hs_dep_lock);
979         hs->hs_dep_bits = 0; /* mark as workitem done */
980         cfs_spin_unlock(&hs->hs_dep_lock);
981         return 0;
982 }
983
984 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
985 {
986         cfs_spin_lock_init(&hs->hs_dep_lock);
987         cfs_wi_init(&hs->hs_dep_wi, hs,
988                     cfs_hash_dep_print, CFS_WI_SCHED_ANY);
989 }
990
991 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
992 {
993         if (cfs_wi_cancel(&hs->hs_dep_wi))
994                 return;
995
996         cfs_spin_lock(&hs->hs_dep_lock);
997         while (hs->hs_dep_bits != 0) {
998                 cfs_spin_unlock(&hs->hs_dep_lock);
999                 cfs_cond_resched();
1000                 cfs_spin_lock(&hs->hs_dep_lock);
1001         }
1002         cfs_spin_unlock(&hs->hs_dep_lock);
1003 }
1004
1005 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1006
1007 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1008 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1009
1010 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1011
1012 cfs_hash_t *
1013 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1014                 unsigned bkt_bits, unsigned extra_bytes,
1015                 unsigned min_theta, unsigned max_theta,
1016                 cfs_hash_ops_t *ops, unsigned flags)
1017 {
1018         cfs_hash_t *hs;
1019         int         len;
1020
1021         ENTRY;
1022
1023         CLASSERT(CFS_HASH_THETA_BITS < 15);
1024
1025         LASSERT(name != NULL);
1026         LASSERT(ops != NULL);
1027         LASSERT(ops->hs_key);
1028         LASSERT(ops->hs_hash);
1029         LASSERT(ops->hs_object);
1030         LASSERT(ops->hs_keycmp);
1031         LASSERT(ops->hs_get != NULL);
1032         LASSERT(ops->hs_put_locked != NULL);
1033
1034         if ((flags & CFS_HASH_REHASH) != 0)
1035                 flags |= CFS_HASH_COUNTER; /* must have counter */
1036
1037         LASSERT(cur_bits > 0);
1038         LASSERT(cur_bits >= bkt_bits);
1039         LASSERT(max_bits >= cur_bits && max_bits < 31);
1040         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1041         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1042                      (flags & CFS_HASH_NO_LOCK) == 0));
1043         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1044                       ops->hs_keycpy != NULL));
1045
1046         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1047               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1048         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1049         if (hs == NULL)
1050                 RETURN(NULL);
1051
1052         strncpy(hs->hs_name, name, len);
1053         hs->hs_name[len - 1] = '\0';
1054         hs->hs_flags = flags;
1055
1056         cfs_atomic_set(&hs->hs_refcount, 1);
1057         cfs_atomic_set(&hs->hs_count, 0);
1058
1059         cfs_hash_lock_setup(hs);
1060         cfs_hash_hlist_setup(hs);
1061
1062         hs->hs_cur_bits = (__u8)cur_bits;
1063         hs->hs_min_bits = (__u8)cur_bits;
1064         hs->hs_max_bits = (__u8)max_bits;
1065         hs->hs_bkt_bits = (__u8)bkt_bits;
1066
1067         hs->hs_ops         = ops;
1068         hs->hs_extra_bytes = extra_bytes;
1069         hs->hs_rehash_bits = 0;
1070         cfs_wi_init(&hs->hs_rehash_wi, hs,
1071                     cfs_hash_rehash_worker, CFS_WI_SCHED_ANY);
1072         cfs_hash_depth_wi_init(hs);
1073
1074         if (cfs_hash_with_rehash(hs))
1075                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1076
1077         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1078                                                   CFS_HASH_NBKT(hs));
1079         if (hs->hs_buckets != NULL)
1080                 return hs;
1081
1082         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1083         RETURN(NULL);
1084 }
1085 CFS_EXPORT_SYMBOL(cfs_hash_create);
1086
1087 /**
1088  * Cleanup libcfs hash @hs.
1089  */
1090 static void
1091 cfs_hash_destroy(cfs_hash_t *hs)
1092 {
1093         cfs_hlist_node_t     *hnode;
1094         cfs_hlist_node_t     *pos;
1095         cfs_hash_bd_t         bd;
1096         int                   i;
1097         ENTRY;
1098
1099         LASSERT(hs != NULL);
1100         LASSERT(!cfs_hash_is_exiting(hs) &&
1101                 !cfs_hash_is_iterating(hs));
1102
1103         /**
1104          * prohibit further rehashes, don't need any lock because
1105          * I'm the only (last) one can change it.
1106          */
1107         hs->hs_exiting = 1;
1108         if (cfs_hash_with_rehash(hs))
1109                 cfs_hash_rehash_cancel(hs);
1110
1111         cfs_hash_depth_wi_cancel(hs);
1112         /* rehash should be done/canceled */
1113         LASSERT(hs->hs_buckets != NULL &&
1114                 hs->hs_rehash_buckets == NULL);
1115
1116         cfs_hash_for_each_bucket(hs, &bd, i) {
1117                 cfs_hlist_head_t *hhead;
1118
1119                 LASSERT(bd.bd_bucket != NULL);
1120                 /* no need to take this lock, just for consistent code */
1121                 cfs_hash_bd_lock(hs, &bd, 1);
1122
1123                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1124                         cfs_hlist_for_each_safe(hnode, pos, hhead) {
1125                                 LASSERTF(!cfs_hash_with_assert_empty(hs),
1126                                          "hash %s bucket %u(%u) is not "
1127                                          " empty: %u items left\n",
1128                                          hs->hs_name, bd.bd_bucket->hsb_index,
1129                                          bd.bd_offset, bd.bd_bucket->hsb_count);
1130                                 /* can't assert key valicate, because we
1131                                  * can interrupt rehash */
1132                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1133                                 cfs_hash_exit(hs, hnode);
1134                         }
1135                 }
1136                 LASSERT(bd.bd_bucket->hsb_count == 0);
1137                 cfs_hash_bd_unlock(hs, &bd, 1);
1138                 cfs_cond_resched();
1139         }
1140
1141         LASSERT(cfs_atomic_read(&hs->hs_count) == 0);
1142
1143         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1144                               0, CFS_HASH_NBKT(hs));
1145         i = cfs_hash_with_bigname(hs) ?
1146             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1147         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1148
1149         EXIT;
1150 }
1151
1152 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1153 {
1154         if (cfs_atomic_inc_not_zero(&hs->hs_refcount))
1155                 return hs;
1156         return NULL;
1157 }
1158 CFS_EXPORT_SYMBOL(cfs_hash_getref);
1159
1160 void cfs_hash_putref(cfs_hash_t *hs)
1161 {
1162         if (cfs_atomic_dec_and_test(&hs->hs_refcount))
1163                 cfs_hash_destroy(hs);
1164 }
1165 CFS_EXPORT_SYMBOL(cfs_hash_putref);
1166
1167 static inline int
1168 cfs_hash_rehash_bits(cfs_hash_t *hs)
1169 {
1170         if (cfs_hash_with_no_lock(hs) ||
1171             !cfs_hash_with_rehash(hs))
1172                 return -EOPNOTSUPP;
1173
1174         if (unlikely(cfs_hash_is_exiting(hs)))
1175                 return -ESRCH;
1176
1177         if (unlikely(cfs_hash_is_rehashing(hs)))
1178                 return -EALREADY;
1179
1180         if (unlikely(cfs_hash_is_iterating(hs)))
1181                 return -EAGAIN;
1182
1183         /* XXX: need to handle case with max_theta != 2.0
1184          *      and the case with min_theta != 0.5 */
1185         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1186             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1187                 return hs->hs_cur_bits + 1;
1188
1189         if (!cfs_hash_with_shrink(hs))
1190                 return 0;
1191
1192         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1193             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1194                 return hs->hs_cur_bits - 1;
1195
1196         return 0;
1197 }
1198
1199 /**
1200  * don't allow inline rehash if:
1201  * - user wants non-blocking change (add/del) on hash table
1202  * - too many elements
1203  */
1204 static inline int
1205 cfs_hash_rehash_inline(cfs_hash_t *hs)
1206 {
1207         return !cfs_hash_with_nblk_change(hs) &&
1208                cfs_atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1209 }
1210
1211 /**
1212  * Add item @hnode to libcfs hash @hs using @key.  The registered
1213  * ops->hs_get function will be called when the item is added.
1214  */
1215 void
1216 cfs_hash_add(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1217 {
1218         cfs_hash_bd_t   bd;
1219         int             bits;
1220
1221         LASSERT(cfs_hlist_unhashed(hnode));
1222
1223         cfs_hash_lock(hs, 0);
1224         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1225
1226         cfs_hash_key_validate(hs, key, hnode);
1227         cfs_hash_bd_add_locked(hs, &bd, hnode);
1228
1229         cfs_hash_bd_unlock(hs, &bd, 1);
1230
1231         bits = cfs_hash_rehash_bits(hs);
1232         cfs_hash_unlock(hs, 0);
1233         if (bits > 0)
1234                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1235 }
1236 CFS_EXPORT_SYMBOL(cfs_hash_add);
1237
1238 static cfs_hlist_node_t *
1239 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1240                      cfs_hlist_node_t *hnode, int noref)
1241 {
1242         cfs_hlist_node_t *ehnode;
1243         cfs_hash_bd_t     bds[2];
1244         int               bits = 0;
1245
1246         LASSERT(cfs_hlist_unhashed(hnode));
1247
1248         cfs_hash_lock(hs, 0);
1249         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1250
1251         cfs_hash_key_validate(hs, key, hnode);
1252         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1253                                                  hnode, noref);
1254         cfs_hash_dual_bd_unlock(hs, bds, 1);
1255
1256         if (ehnode == hnode) /* new item added */
1257                 bits = cfs_hash_rehash_bits(hs);
1258         cfs_hash_unlock(hs, 0);
1259         if (bits > 0)
1260                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1261
1262         return ehnode;
1263 }
1264
1265 /**
1266  * Add item @hnode to libcfs hash @hs using @key.  The registered
1267  * ops->hs_get function will be called if the item was added.
1268  * Returns 0 on success or -EALREADY on key collisions.
1269  */
1270 int
1271 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1272 {
1273         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1274                -EALREADY : 0;
1275 }
1276 CFS_EXPORT_SYMBOL(cfs_hash_add_unique);
1277
1278 /**
1279  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1280  * already exists in the hash then ops->hs_get will be called on the
1281  * conflicting entry and that entry will be returned to the caller.
1282  * Otherwise ops->hs_get is called on the item which was added.
1283  */
1284 void *
1285 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1286                         cfs_hlist_node_t *hnode)
1287 {
1288         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1289
1290         return cfs_hash_object(hs, hnode);
1291 }
1292 CFS_EXPORT_SYMBOL(cfs_hash_findadd_unique);
1293
1294 /**
1295  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1296  * is required to ensure the correct hash bucket is locked since there
1297  * is no direct linkage from the item to the bucket.  The object
1298  * removed from the hash will be returned and obs->hs_put is called
1299  * on the removed object.
1300  */
1301 void *
1302 cfs_hash_del(cfs_hash_t *hs, const void *key, cfs_hlist_node_t *hnode)
1303 {
1304         void           *obj  = NULL;
1305         int             bits = 0;
1306         cfs_hash_bd_t   bds[2];
1307
1308         cfs_hash_lock(hs, 0);
1309         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1310
1311         if (bds[1].bd_bucket == NULL && hnode != NULL)
1312                 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1313         else
1314                 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds, key, hnode);
1315
1316         if (hnode != NULL) {
1317                 obj  = cfs_hash_object(hs, hnode);
1318                 bits = cfs_hash_rehash_bits(hs);
1319         }
1320
1321         cfs_hash_dual_bd_unlock(hs, bds, 1);
1322         cfs_hash_unlock(hs, 0);
1323         if (bits > 0)
1324                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1325
1326         return obj;
1327 }
1328 CFS_EXPORT_SYMBOL(cfs_hash_del);
1329
1330 /**
1331  * Delete item given @key in libcfs hash @hs.  The first @key found in
1332  * the hash will be removed, if the key exists multiple times in the hash
1333  * @hs this function must be called once per key.  The removed object
1334  * will be returned and ops->hs_put is called on the removed object.
1335  */
1336 void *
1337 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1338 {
1339         return cfs_hash_del(hs, key, NULL);
1340 }
1341 CFS_EXPORT_SYMBOL(cfs_hash_del_key);
1342
1343 /**
1344  * Lookup an item using @key in the libcfs hash @hs and return it.
1345  * If the @key is found in the hash hs->hs_get() is called and the
1346  * matching objects is returned.  It is the callers responsibility
1347  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1348  * when when finished with the object.  If the @key was not found
1349  * in the hash @hs NULL is returned.
1350  */
1351 void *
1352 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1353 {
1354         void                 *obj = NULL;
1355         cfs_hlist_node_t     *hnode;
1356         cfs_hash_bd_t         bds[2];
1357
1358         cfs_hash_lock(hs, 0);
1359         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1360
1361         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1362         if (hnode != NULL)
1363                 obj = cfs_hash_object(hs, hnode);
1364
1365         cfs_hash_dual_bd_unlock(hs, bds, 0);
1366         cfs_hash_unlock(hs, 0);
1367
1368         return obj;
1369 }
1370 CFS_EXPORT_SYMBOL(cfs_hash_lookup);
1371
1372 static void
1373 cfs_hash_for_each_enter(cfs_hash_t *hs)
1374 {
1375         LASSERT(!cfs_hash_is_exiting(hs));
1376
1377         if (!cfs_hash_with_rehash(hs))
1378                 return;
1379         /*
1380          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1381          * because it's just an unreliable signal to rehash-thread,
1382          * rehash-thread will try to finsih rehash ASAP when seeing this.
1383          */
1384         hs->hs_iterating = 1;
1385
1386         cfs_hash_lock(hs, 1);
1387         hs->hs_iterators++;
1388
1389         /* NB: iteration is mostly called by service thread,
1390          * we tend to cancel pending rehash-requst, instead of
1391          * blocking service thread, we will relaunch rehash request
1392          * after iteration */
1393         if (cfs_hash_is_rehashing(hs))
1394                 cfs_hash_rehash_cancel_locked(hs);
1395         cfs_hash_unlock(hs, 1);
1396 }
1397
1398 static void
1399 cfs_hash_for_each_exit(cfs_hash_t *hs)
1400 {
1401         int remained;
1402         int bits;
1403
1404         if (!cfs_hash_with_rehash(hs))
1405                 return;
1406         cfs_hash_lock(hs, 1);
1407         remained = --hs->hs_iterators;
1408         bits = cfs_hash_rehash_bits(hs);
1409         cfs_hash_unlock(hs, 1);
1410         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1411         if (remained == 0)
1412                 hs->hs_iterating = 0;
1413         if (bits > 0) {
1414                 cfs_hash_rehash(hs, cfs_atomic_read(&hs->hs_count) <
1415                                     CFS_HASH_LOOP_HOG);
1416         }
1417 }
1418
1419 /**
1420  * For each item in the libcfs hash @hs call the passed callback @func
1421  * and pass to it as an argument each hash item and the private @data.
1422  *
1423  * a) the function may sleep!
1424  * b) during the callback:
1425  *    . the bucket lock is held so the callback must never sleep.
1426  *    . if @removal_safe is true, use can remove current item by
1427  *      cfs_hash_bd_del_locked
1428  */
1429 static __u64
1430 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1431                         void *data, int remove_safe)
1432 {
1433         cfs_hlist_node_t     *hnode;
1434         cfs_hlist_node_t     *pos;
1435         cfs_hash_bd_t         bd;
1436         __u64                 count = 0;
1437         int                   excl  = !!remove_safe;
1438         int                   loop  = 0;
1439         int                   i;
1440         ENTRY;
1441
1442         cfs_hash_for_each_enter(hs);
1443
1444         cfs_hash_lock(hs, 0);
1445         LASSERT(!cfs_hash_is_rehashing(hs));
1446
1447         cfs_hash_for_each_bucket(hs, &bd, i) {
1448                 cfs_hlist_head_t *hhead;
1449
1450                 cfs_hash_bd_lock(hs, &bd, excl);
1451                 if (func == NULL) { /* only glimpse size */
1452                         count += bd.bd_bucket->hsb_count;
1453                         cfs_hash_bd_unlock(hs, &bd, excl);
1454                         continue;
1455                 }
1456
1457                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1458                         cfs_hlist_for_each_safe(hnode, pos, hhead) {
1459                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1460                                 count++;
1461                                 loop++;
1462                                 if (func(hs, &bd, hnode, data)) {
1463                                         cfs_hash_bd_unlock(hs, &bd, excl);
1464                                         goto out;
1465                                 }
1466                         }
1467                 }
1468                 cfs_hash_bd_unlock(hs, &bd, excl);
1469                 if (loop < CFS_HASH_LOOP_HOG)
1470                         continue;
1471                 loop = 0;
1472                 cfs_hash_unlock(hs, 0);
1473                 cfs_cond_resched();
1474                 cfs_hash_lock(hs, 0);
1475         }
1476  out:
1477         cfs_hash_unlock(hs, 0);
1478
1479         cfs_hash_for_each_exit(hs);
1480         RETURN(count);
1481 }
1482
1483 typedef struct {
1484         cfs_hash_cond_opt_cb_t  func;
1485         void                   *arg;
1486 } cfs_hash_cond_arg_t;
1487
1488 static int
1489 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1490                          cfs_hlist_node_t *hnode, void *data)
1491 {
1492         cfs_hash_cond_arg_t *cond = data;
1493
1494         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1495                 cfs_hash_bd_del_locked(hs, bd, hnode);
1496         return 0;
1497 }
1498
1499 /**
1500  * Delete item from the libcfs hash @hs when @func return true.
1501  * The write lock being hold during loop for each bucket to avoid
1502  * any object be reference.
1503  */
1504 void
1505 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1506 {
1507         cfs_hash_cond_arg_t arg = {
1508                 .func   = func,
1509                 .arg    = data,
1510         };
1511
1512         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1513 }
1514 CFS_EXPORT_SYMBOL(cfs_hash_cond_del);
1515
1516 void
1517 cfs_hash_for_each(cfs_hash_t *hs,
1518                   cfs_hash_for_each_cb_t func, void *data)
1519 {
1520         cfs_hash_for_each_tight(hs, func, data, 0);
1521 }
1522 CFS_EXPORT_SYMBOL(cfs_hash_for_each);
1523
1524 void
1525 cfs_hash_for_each_safe(cfs_hash_t *hs,
1526                        cfs_hash_for_each_cb_t func, void *data)
1527 {
1528         cfs_hash_for_each_tight(hs, func, data, 1);
1529 }
1530 CFS_EXPORT_SYMBOL(cfs_hash_for_each_safe);
1531
1532 static int
1533 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1534               cfs_hlist_node_t *hnode, void *data)
1535 {
1536         *(int *)data = 0;
1537         return 1; /* return 1 to break the loop */
1538 }
1539
1540 int
1541 cfs_hash_is_empty(cfs_hash_t *hs)
1542 {
1543         int empty = 1;
1544
1545         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1546         return empty;
1547 }
1548 CFS_EXPORT_SYMBOL(cfs_hash_is_empty);
1549
1550 __u64
1551 cfs_hash_size_get(cfs_hash_t *hs)
1552 {
1553         return cfs_hash_with_counter(hs) ?
1554                cfs_atomic_read(&hs->hs_count) :
1555                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1556 }
1557 CFS_EXPORT_SYMBOL(cfs_hash_size_get);
1558
1559 /*
1560  * cfs_hash_for_each_relax:
1561  * Iterate the hash table and call @func on each item without
1562  * any lock. This function can't guarantee to finish iteration
1563  * if these features are enabled:
1564  *
1565  *  a. if rehash_key is enabled, an item can be moved from
1566  *     one bucket to another bucket
1567  *  b. user can remove non-zero-ref item from hash-table,
1568  *     so the item can be removed from hash-table, even worse,
1569  *     it's possible that user changed key and insert to another
1570  *     hash bucket.
1571  * there's no way for us to finish iteration correctly on previous
1572  * two cases, so iteration has to be stopped on change.
1573  */
1574 static int
1575 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1576 {
1577         cfs_hlist_node_t *hnode;
1578         cfs_hlist_node_t *tmp;
1579         cfs_hash_bd_t     bd;
1580         __u32             version;
1581         int               count = 0;
1582         int               stop_on_change;
1583         int               rc;
1584         int               i;
1585         ENTRY;
1586
1587         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1588                          !cfs_hash_with_no_itemref(hs) ||
1589                          CFS_HOP(hs, put_locked) == NULL;
1590         cfs_hash_lock(hs, 0);
1591         LASSERT(!cfs_hash_is_rehashing(hs));
1592
1593         cfs_hash_for_each_bucket(hs, &bd, i) {
1594                 cfs_hlist_head_t *hhead;
1595
1596                 cfs_hash_bd_lock(hs, &bd, 0);
1597                 version = cfs_hash_bd_version_get(&bd);
1598
1599                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1600                         for (hnode = hhead->first; hnode != NULL;) {
1601                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1602                                 cfs_hash_get(hs, hnode);
1603                                 cfs_hash_bd_unlock(hs, &bd, 0);
1604                                 cfs_hash_unlock(hs, 0);
1605
1606                                 rc = func(hs, &bd, hnode, data);
1607                                 if (stop_on_change)
1608                                         cfs_hash_put(hs, hnode);
1609                                 cfs_cond_resched();
1610                                 count++;
1611
1612                                 cfs_hash_lock(hs, 0);
1613                                 cfs_hash_bd_lock(hs, &bd, 0);
1614                                 if (!stop_on_change) {
1615                                         tmp = hnode->next;
1616                                         cfs_hash_put_locked(hs, hnode);
1617                                         hnode = tmp;
1618                                 } else { /* bucket changed? */
1619                                         if (version !=
1620                                             cfs_hash_bd_version_get(&bd))
1621                                                 break;
1622                                         /* safe to continue because no change */
1623                                         hnode = hnode->next;
1624                                 }
1625                                 if (rc) /* callback wants to break iteration */
1626                                         break;
1627                         }
1628                 }
1629                 cfs_hash_bd_unlock(hs, &bd, 0);
1630         }
1631         cfs_hash_unlock(hs, 0);
1632
1633         return count;
1634 }
1635
1636 int
1637 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1638                          cfs_hash_for_each_cb_t func, void *data)
1639 {
1640         ENTRY;
1641
1642         if (cfs_hash_with_no_lock(hs) ||
1643             cfs_hash_with_rehash_key(hs) ||
1644             !cfs_hash_with_no_itemref(hs))
1645                 RETURN(-EOPNOTSUPP);
1646
1647         if (CFS_HOP(hs, get) == NULL ||
1648             (CFS_HOP(hs, put) == NULL &&
1649              CFS_HOP(hs, put_locked) == NULL))
1650                 RETURN(-EOPNOTSUPP);
1651
1652         cfs_hash_for_each_enter(hs);
1653         cfs_hash_for_each_relax(hs, func, data);
1654         cfs_hash_for_each_exit(hs);
1655
1656         RETURN(0);
1657 }
1658 CFS_EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1659
1660 /**
1661  * For each hash bucket in the libcfs hash @hs call the passed callback
1662  * @func until all the hash buckets are empty.  The passed callback @func
1663  * or the previously registered callback hs->hs_put must remove the item
1664  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1665  * functions.  No rwlocks will be held during the callback @func it is
1666  * safe to sleep if needed.  This function will not terminate until the
1667  * hash is empty.  Note it is still possible to concurrently add new
1668  * items in to the hash.  It is the callers responsibility to ensure
1669  * the required locking is in place to prevent concurrent insertions.
1670  */
1671 int
1672 cfs_hash_for_each_empty(cfs_hash_t *hs,
1673                         cfs_hash_for_each_cb_t func, void *data)
1674 {
1675         unsigned  i = 0;
1676         ENTRY;
1677
1678         if (cfs_hash_with_no_lock(hs))
1679                 return -EOPNOTSUPP;
1680
1681         if (CFS_HOP(hs, get) == NULL ||
1682             (CFS_HOP(hs, put) == NULL &&
1683              CFS_HOP(hs, put_locked) == NULL))
1684                 return -EOPNOTSUPP;
1685
1686         cfs_hash_for_each_enter(hs);
1687         while (cfs_hash_for_each_relax(hs, func, data)) {
1688                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1689                        hs->hs_name, i++);
1690         }
1691         cfs_hash_for_each_exit(hs);
1692         RETURN(0);
1693 }
1694 CFS_EXPORT_SYMBOL(cfs_hash_for_each_empty);
1695
1696 void
1697 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1698                         cfs_hash_for_each_cb_t func, void *data)
1699 {
1700         cfs_hlist_head_t   *hhead;
1701         cfs_hlist_node_t   *hnode;
1702         cfs_hash_bd_t       bd;
1703
1704         cfs_hash_for_each_enter(hs);
1705         cfs_hash_lock(hs, 0);
1706         if (hindex >= CFS_HASH_NHLIST(hs))
1707                 goto out;
1708
1709         cfs_hash_bd_index_set(hs, hindex, &bd);
1710
1711         cfs_hash_bd_lock(hs, &bd, 0);
1712         hhead = cfs_hash_bd_hhead(hs, &bd);
1713         cfs_hlist_for_each(hnode, hhead) {
1714                 if (func(hs, &bd, hnode, data))
1715                         break;
1716         }
1717         cfs_hash_bd_unlock(hs, &bd, 0);
1718  out:
1719         cfs_hash_unlock(hs, 0);
1720         cfs_hash_for_each_exit(hs);
1721 }
1722
1723 CFS_EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1724
1725 /*
1726  * For each item in the libcfs hash @hs which matches the @key call
1727  * the passed callback @func and pass to it as an argument each hash
1728  * item and the private @data. During the callback the bucket lock
1729  * is held so the callback must never sleep.
1730    */
1731 void
1732 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1733                       cfs_hash_for_each_cb_t func, void *data)
1734 {
1735         cfs_hlist_node_t   *hnode;
1736         cfs_hash_bd_t       bds[2];
1737         unsigned            i;
1738
1739         cfs_hash_lock(hs, 0);
1740
1741         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1742
1743         cfs_hash_for_each_bd(bds, 2, i) {
1744                 cfs_hlist_head_t *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1745
1746                 cfs_hlist_for_each(hnode, hlist) {
1747                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1748
1749                         if (cfs_hash_keycmp(hs, key, hnode)) {
1750                                 if (func(hs, &bds[i], hnode, data))
1751                                         break;
1752                         }
1753                 }
1754         }
1755
1756         cfs_hash_dual_bd_unlock(hs, bds, 0);
1757         cfs_hash_unlock(hs, 0);
1758 }
1759 CFS_EXPORT_SYMBOL(cfs_hash_for_each_key);
1760
1761 /**
1762  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1763  * to grow the hash size when excessive chaining is detected, or to
1764  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1765  * flag is set in @hs the libcfs hash may be dynamically rehashed
1766  * during addition or removal if the hash's theta value exceeds
1767  * either the hs->hs_min_theta or hs->max_theta values.  By default
1768  * these values are tuned to keep the chained hash depth small, and
1769  * this approach assumes a reasonably uniform hashing function.  The
1770  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1771  */
1772 void
1773 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1774 {
1775         int     i;
1776
1777         /* need hold cfs_hash_lock(hs, 1) */
1778         LASSERT(cfs_hash_with_rehash(hs) &&
1779                 !cfs_hash_with_no_lock(hs));
1780
1781         if (!cfs_hash_is_rehashing(hs))
1782                 return;
1783
1784         if (cfs_wi_cancel(&hs->hs_rehash_wi)) {
1785                 hs->hs_rehash_bits = 0;
1786                 return;
1787         }
1788
1789         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1790                 cfs_hash_unlock(hs, 1);
1791                 /* raise console warning while waiting too long */
1792                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1793                        "hash %s is still rehashing, rescheded %d\n",
1794                        hs->hs_name, i - 1);
1795                 cfs_cond_resched();
1796                 cfs_hash_lock(hs, 1);
1797         }
1798 }
1799 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1800
1801 void
1802 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1803 {
1804         cfs_hash_lock(hs, 1);
1805         cfs_hash_rehash_cancel_locked(hs);
1806         cfs_hash_unlock(hs, 1);
1807 }
1808 CFS_EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1809
1810 int
1811 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1812 {
1813         int     rc;
1814
1815         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1816
1817         cfs_hash_lock(hs, 1);
1818
1819         rc = cfs_hash_rehash_bits(hs);
1820         if (rc <= 0) {
1821                 cfs_hash_unlock(hs, 1);
1822                 return rc;
1823         }
1824
1825         hs->hs_rehash_bits = rc;
1826         if (!do_rehash) {
1827                 /* launch and return */
1828                 cfs_wi_schedule(&hs->hs_rehash_wi);
1829                 cfs_hash_unlock(hs, 1);
1830                 return 0;
1831         }
1832
1833         /* rehash right now */
1834         cfs_hash_unlock(hs, 1);
1835
1836         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1837 }
1838 CFS_EXPORT_SYMBOL(cfs_hash_rehash);
1839
1840 static int
1841 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1842 {
1843         cfs_hash_bd_t      new;
1844         cfs_hlist_head_t  *hhead;
1845         cfs_hlist_node_t  *hnode;
1846         cfs_hlist_node_t  *pos;
1847         void              *key;
1848         int                c = 0;
1849
1850         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1851         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1852                 cfs_hlist_for_each_safe(hnode, pos, hhead) {
1853                         key = cfs_hash_key(hs, hnode);
1854                         LASSERT(key != NULL);
1855                         /* Validate hnode is in the correct bucket. */
1856                         cfs_hash_bucket_validate(hs, old, hnode);
1857                         /*
1858                          * Delete from old hash bucket; move to new bucket.
1859                          * ops->hs_key must be defined.
1860                          */
1861                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1862                                              hs->hs_rehash_bits, key, &new);
1863                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1864                         c++;
1865                 }
1866         }
1867
1868         return c;
1869 }
1870
1871 static int
1872 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1873 {
1874         cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1875         cfs_hash_bucket_t **bkts;
1876         cfs_hash_bd_t       bd;
1877         unsigned int        old_size;
1878         unsigned int        new_size;
1879         int                 bsize;
1880         int                 count = 0;
1881         int                 rc = 0;
1882         int                 i;
1883
1884         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1885
1886         cfs_hash_lock(hs, 0);
1887         LASSERT(cfs_hash_is_rehashing(hs));
1888
1889         old_size = CFS_HASH_NBKT(hs);
1890         new_size = CFS_HASH_RH_NBKT(hs);
1891
1892         cfs_hash_unlock(hs, 0);
1893
1894         /*
1895          * don't need hs::hs_rwlock for hs::hs_buckets,
1896          * because nobody can change bkt-table except me.
1897          */
1898         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1899                                         old_size, new_size);
1900         cfs_hash_lock(hs, 1);
1901         if (bkts == NULL) {
1902                 rc = -ENOMEM;
1903                 goto out;
1904         }
1905
1906         if (bkts == hs->hs_buckets) {
1907                 bkts = NULL; /* do nothing */
1908                 goto out;
1909         }
1910
1911         rc = __cfs_hash_theta(hs);
1912         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1913                 /* free the new allocated bkt-table */
1914                 old_size = new_size;
1915                 new_size = CFS_HASH_NBKT(hs);
1916                 rc = -EALREADY;
1917                 goto out;
1918         }
1919
1920         LASSERT(hs->hs_rehash_buckets == NULL);
1921         hs->hs_rehash_buckets = bkts;
1922
1923         rc = 0;
1924         cfs_hash_for_each_bucket(hs, &bd, i) {
1925                 if (cfs_hash_is_exiting(hs)) {
1926                         rc = -ESRCH;
1927                         /* someone wants to destroy the hash, abort now */
1928                         if (old_size < new_size) /* OK to free old bkt-table */
1929                                 break;
1930                         /* it's shrinking, need free new bkt-table */
1931                         hs->hs_rehash_buckets = NULL;
1932                         old_size = new_size;
1933                         new_size = CFS_HASH_NBKT(hs);
1934                         goto out;
1935                 }
1936
1937                 count += cfs_hash_rehash_bd(hs, &bd);
1938                 if (count < CFS_HASH_LOOP_HOG ||
1939                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1940                         continue;
1941                 }
1942
1943                 count = 0;
1944                 cfs_hash_unlock(hs, 1);
1945                 cfs_cond_resched();
1946                 cfs_hash_lock(hs, 1);
1947         }
1948
1949         hs->hs_rehash_count++;
1950
1951         bkts = hs->hs_buckets;
1952         hs->hs_buckets = hs->hs_rehash_buckets;
1953         hs->hs_rehash_buckets = NULL;
1954
1955         hs->hs_cur_bits = hs->hs_rehash_bits;
1956  out:
1957         hs->hs_rehash_bits = 0;
1958         if (rc == -ESRCH)
1959                 cfs_wi_exit(wi); /* never be scheduled again */
1960         bsize = cfs_hash_bkt_size(hs);
1961         cfs_hash_unlock(hs, 1);
1962         /* can't refer to @hs anymore because it could be destroyed */
1963         if (bkts != NULL)
1964                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1965         if (rc != 0)
1966                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1967         /* cfs_workitem require us to always return 0 */
1968         return 0;
1969 }
1970
1971 /**
1972  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1973  * @old_key must be provided to locate the objects previous location
1974  * in the hash, and the @new_key will be used to reinsert the object.
1975  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1976  * combo when it is critical that there is no window in time where the
1977  * object is missing from the hash.  When an object is being rehashed
1978  * the registered cfs_hash_get() and cfs_hash_put() functions will
1979  * not be called.
1980  */
1981 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1982                          void *new_key, cfs_hlist_node_t *hnode)
1983 {
1984         cfs_hash_bd_t        bds[3];
1985         cfs_hash_bd_t        old_bds[2];
1986         cfs_hash_bd_t        new_bd;
1987
1988         LASSERT(!cfs_hlist_unhashed(hnode));
1989
1990         cfs_hash_lock(hs, 0);
1991
1992         cfs_hash_dual_bd_get(hs, old_key, old_bds);
1993         cfs_hash_bd_get(hs, new_key, &new_bd);
1994
1995         bds[0] = old_bds[0];
1996         bds[1] = old_bds[1];
1997         bds[2] = new_bd;
1998
1999         /* NB: bds[0] and bds[1] are ordered already */
2000         cfs_hash_bd_order(&bds[1], &bds[2]);
2001         cfs_hash_bd_order(&bds[0], &bds[1]);
2002
2003         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2004         if (likely(old_bds[1].bd_bucket == NULL)) {
2005                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2006         } else {
2007                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2008                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2009         }
2010         /* overwrite key inside locks, otherwise may screw up with
2011          * other operations, i.e: rehash */
2012         cfs_hash_keycpy(hs, new_key, hnode);
2013
2014         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2015         cfs_hash_unlock(hs, 0);
2016 }
2017 CFS_EXPORT_SYMBOL(cfs_hash_rehash_key);
2018
2019 int cfs_hash_debug_header(char *str, int size)
2020 {
2021         return snprintf(str, size, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2022                  CFS_HASH_BIGNAME_LEN,
2023                  "name", "cur", "min", "max", "theta", "t-min", "t-max",
2024                  "flags", "rehash", "count", "maxdep", "maxdepb",
2025                  " distribution");
2026 }
2027 CFS_EXPORT_SYMBOL(cfs_hash_debug_header);
2028
2029 static cfs_hash_bucket_t **
2030 cfs_hash_full_bkts(cfs_hash_t *hs)
2031 {
2032         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2033         if (hs->hs_rehash_buckets == NULL)
2034                 return hs->hs_buckets;
2035
2036         LASSERT(hs->hs_rehash_bits != 0);
2037         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2038                hs->hs_rehash_buckets : hs->hs_buckets;
2039 }
2040
2041 static unsigned int
2042 cfs_hash_full_nbkt(cfs_hash_t *hs)
2043 {
2044         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2045         if (hs->hs_rehash_buckets == NULL)
2046                 return CFS_HASH_NBKT(hs);
2047
2048         LASSERT(hs->hs_rehash_bits != 0);
2049         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2050                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2051 }
2052
2053 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
2054 {
2055         int                    dist[8] = { 0, };
2056         int                    maxdep  = -1;
2057         int                    maxdepb = -1;
2058         int                    total   = 0;
2059         int                    c       = 0;
2060         int                    theta;
2061         int                    i;
2062
2063         if (str == NULL || size == 0)
2064                 return 0;
2065
2066         cfs_hash_lock(hs, 0);
2067         theta = __cfs_hash_theta(hs);
2068
2069         c += snprintf(str + c, size - c, "%-*s ",
2070                       CFS_HASH_BIGNAME_LEN, hs->hs_name);
2071         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_cur_bits);
2072         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_min_bits);
2073         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_max_bits);
2074         c += snprintf(str + c, size - c, "%d.%03d ",
2075                       __cfs_hash_theta_int(theta),
2076                       __cfs_hash_theta_frac(theta));
2077         c += snprintf(str + c, size - c, "%d.%03d ",
2078                       __cfs_hash_theta_int(hs->hs_min_theta),
2079                       __cfs_hash_theta_frac(hs->hs_min_theta));
2080         c += snprintf(str + c, size - c, "%d.%03d ",
2081                       __cfs_hash_theta_int(hs->hs_max_theta),
2082                       __cfs_hash_theta_frac(hs->hs_max_theta));
2083         c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
2084         c += snprintf(str + c, size - c, "%6d ", hs->hs_rehash_count);
2085
2086         /*
2087          * The distribution is a summary of the chained hash depth in
2088          * each of the libcfs hash buckets.  Each buckets hsb_count is
2089          * divided by the hash theta value and used to generate a
2090          * histogram of the hash distribution.  A uniform hash will
2091          * result in all hash buckets being close to the average thus
2092          * only the first few entries in the histogram will be non-zero.
2093          * If you hash function results in a non-uniform hash the will
2094          * be observable by outlier bucks in the distribution histogram.
2095          *
2096          * Uniform hash distribution:      128/128/0/0/0/0/0/0
2097          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2098          */
2099         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2100                 cfs_hash_bd_t  bd;
2101
2102                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2103                 cfs_hash_bd_lock(hs, &bd, 0);
2104                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2105                         maxdep  = bd.bd_bucket->hsb_depmax;
2106 #ifdef __KERNEL__
2107                         maxdepb = cfs_ffz(~maxdep);
2108 #endif
2109                 }
2110                 total += bd.bd_bucket->hsb_count;
2111                 dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2112                 cfs_hash_bd_unlock(hs, &bd, 0);
2113         }
2114
2115         c += snprintf(str + c, size - c, "%7d ", total);
2116         c += snprintf(str + c, size - c, "%7d ", maxdep);
2117         c += snprintf(str + c, size - c, "%7d ", maxdepb);
2118         for (i = 0; i < 8; i++)
2119                 c += snprintf(str + c, size - c, "%d%c",  dist[i],
2120                               (i == 7) ? '\n' : '/');
2121
2122         cfs_hash_unlock(hs, 0);
2123
2124         return c;
2125 }
2126 CFS_EXPORT_SYMBOL(cfs_hash_debug_str);