Whamcloud - gitweb
LU-6304 ldlm: crash on umount in cleanup_resource
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2015, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(instead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can specify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109 #include <linux/seq_file.h>
110
111 #include <libcfs/libcfs.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
126
127 static inline void
128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129         __acquires(&lock->spin)
130 {
131         spin_lock(&lock->spin);
132 }
133
134 static inline void
135 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
136         __releases(&lock->spin)
137 {
138         spin_unlock(&lock->spin);
139 }
140
141 static inline void
142 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
143         __acquires(&lock->rw)
144 {
145         if (!exclusive)
146                 read_lock(&lock->rw);
147         else
148                 write_lock(&lock->rw);
149 }
150
151 static inline void
152 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
153         __releases(&lock->rw)
154 {
155         if (!exclusive)
156                 read_unlock(&lock->rw);
157         else
158                 write_unlock(&lock->rw);
159 }
160
161 /** No lock hash */
162 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
163         .hs_lock        = cfs_hash_nl_lock,
164         .hs_unlock      = cfs_hash_nl_unlock,
165         .hs_bkt_lock    = cfs_hash_nl_lock,
166         .hs_bkt_unlock  = cfs_hash_nl_unlock,
167 };
168
169 /** no bucket lock, one spinlock to protect everything */
170 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
171         .hs_lock        = cfs_hash_spin_lock,
172         .hs_unlock      = cfs_hash_spin_unlock,
173         .hs_bkt_lock    = cfs_hash_nl_lock,
174         .hs_bkt_unlock  = cfs_hash_nl_unlock,
175 };
176
177 /** spin bucket lock, rehash is enabled */
178 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
179         .hs_lock        = cfs_hash_rw_lock,
180         .hs_unlock      = cfs_hash_rw_unlock,
181         .hs_bkt_lock    = cfs_hash_spin_lock,
182         .hs_bkt_unlock  = cfs_hash_spin_unlock,
183 };
184
185 /** rw bucket lock, rehash is enabled */
186 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
187         .hs_lock        = cfs_hash_rw_lock,
188         .hs_unlock      = cfs_hash_rw_unlock,
189         .hs_bkt_lock    = cfs_hash_rw_lock,
190         .hs_bkt_unlock  = cfs_hash_rw_unlock,
191 };
192
193 /** spin bucket lock, rehash is disabled */
194 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
195         .hs_lock        = cfs_hash_nl_lock,
196         .hs_unlock      = cfs_hash_nl_unlock,
197         .hs_bkt_lock    = cfs_hash_spin_lock,
198         .hs_bkt_unlock  = cfs_hash_spin_unlock,
199 };
200
201 /** rw bucket lock, rehash is disabled */
202 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
203         .hs_lock        = cfs_hash_nl_lock,
204         .hs_unlock      = cfs_hash_nl_unlock,
205         .hs_bkt_lock    = cfs_hash_rw_lock,
206         .hs_bkt_unlock  = cfs_hash_rw_unlock,
207 };
208
209 static void
210 cfs_hash_lock_setup(struct cfs_hash *hs)
211 {
212         if (cfs_hash_with_no_lock(hs)) {
213                 hs->hs_lops = &cfs_hash_nl_lops;
214
215         } else if (cfs_hash_with_no_bktlock(hs)) {
216                 hs->hs_lops = &cfs_hash_nbl_lops;
217                 spin_lock_init(&hs->hs_lock.spin);
218
219         } else if (cfs_hash_with_rehash(hs)) {
220                 rwlock_init(&hs->hs_lock.rw);
221
222                 if (cfs_hash_with_rw_bktlock(hs))
223                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
224                 else if (cfs_hash_with_spin_bktlock(hs))
225                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
226                 else
227                         LBUG();
228         } else {
229                 if (cfs_hash_with_rw_bktlock(hs))
230                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
231                 else if (cfs_hash_with_spin_bktlock(hs))
232                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
233                 else
234                         LBUG();
235         }
236 }
237
238 /**
239  * Simple hash head without depth tracking
240  * new element is always added to head of hlist
241  */
242 struct cfs_hash_head {
243         struct hlist_head       hh_head;        /**< entries list */
244 };
245
246 static int
247 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
248 {
249         return sizeof(struct cfs_hash_head);
250 }
251
252 static struct hlist_head *
253 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
254 {
255         struct cfs_hash_head *head;
256
257         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
258         return &head[bd->bd_offset].hh_head;
259 }
260
261 static int
262 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
263                       struct hlist_node *hnode)
264 {
265         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266         return -1; /* unknown depth */
267 }
268
269 static int
270 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
271                       struct hlist_node *hnode)
272 {
273         hlist_del_init(hnode);
274         return -1; /* unknown depth */
275 }
276
277 /**
278  * Simple hash head with depth tracking
279  * new element is always added to head of hlist
280  */
281 struct cfs_hash_head_dep {
282         struct hlist_head       hd_head;        /**< entries list */
283         unsigned int            hd_depth;       /**< list length */
284 };
285
286 static int
287 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
288 {
289         return sizeof(struct cfs_hash_head_dep);
290 }
291
292 static struct hlist_head *
293 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
294 {
295         struct cfs_hash_head_dep   *head;
296
297         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
298         return &head[bd->bd_offset].hd_head;
299 }
300
301 static int
302 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303                       struct hlist_node *hnode)
304 {
305         struct cfs_hash_head_dep *hh;
306
307         hh = container_of(cfs_hash_hd_hhead(hs, bd),
308                           struct cfs_hash_head_dep, hd_head);
309         hlist_add_head(hnode, &hh->hd_head);
310         return ++hh->hd_depth;
311 }
312
313 static int
314 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
315                       struct hlist_node *hnode)
316 {
317         struct cfs_hash_head_dep *hh;
318
319         hh = container_of(cfs_hash_hd_hhead(hs, bd),
320                           struct cfs_hash_head_dep, hd_head);
321         hlist_del_init(hnode);
322         return --hh->hd_depth;
323 }
324
325 /**
326  * double links hash head without depth tracking
327  * new element is always added to tail of hlist
328  */
329 struct cfs_hash_dhead {
330         struct hlist_head       dh_head;        /**< entries list */
331         struct hlist_node       *dh_tail;       /**< the last entry */
332 };
333
334 static int
335 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
336 {
337         return sizeof(struct cfs_hash_dhead);
338 }
339
340 static struct hlist_head *
341 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
342 {
343         struct cfs_hash_dhead *head;
344
345         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
346         return &head[bd->bd_offset].dh_head;
347 }
348
349 static int
350 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
351                       struct hlist_node *hnode)
352 {
353         struct cfs_hash_dhead *dh;
354
355         dh = container_of(cfs_hash_dh_hhead(hs, bd),
356                           struct cfs_hash_dhead, dh_head);
357         if (dh->dh_tail != NULL) /* not empty */
358                 hlist_add_behind(hnode, dh->dh_tail);
359         else /* empty list */
360                 hlist_add_head(hnode, &dh->dh_head);
361         dh->dh_tail = hnode;
362         return -1; /* unknown depth */
363 }
364
365 static int
366 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
367                       struct hlist_node *hnd)
368 {
369         struct cfs_hash_dhead *dh;
370
371         dh = container_of(cfs_hash_dh_hhead(hs, bd),
372                           struct cfs_hash_dhead, dh_head);
373         if (hnd->next == NULL) { /* it's the tail */
374                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
375                               container_of(hnd->pprev, struct hlist_node, next);
376         }
377         hlist_del_init(hnd);
378         return -1; /* unknown depth */
379 }
380
381 /**
382  * double links hash head with depth tracking
383  * new element is always added to tail of hlist
384  */
385 struct cfs_hash_dhead_dep {
386         struct hlist_head       dd_head;        /**< entries list */
387         struct hlist_node       *dd_tail;       /**< the last entry */
388         unsigned int            dd_depth;       /**< list length */
389 };
390
391 static int
392 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
393 {
394         return sizeof(struct cfs_hash_dhead_dep);
395 }
396
397 static struct hlist_head *
398 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
399 {
400         struct cfs_hash_dhead_dep *head;
401
402         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
403         return &head[bd->bd_offset].dd_head;
404 }
405
406 static int
407 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
408                       struct hlist_node *hnode)
409 {
410         struct cfs_hash_dhead_dep *dh;
411
412         dh = container_of(cfs_hash_dd_hhead(hs, bd),
413                           struct cfs_hash_dhead_dep, dd_head);
414         if (dh->dd_tail != NULL) /* not empty */
415                 hlist_add_behind(hnode, dh->dd_tail);
416         else /* empty list */
417                 hlist_add_head(hnode, &dh->dd_head);
418         dh->dd_tail = hnode;
419         return ++dh->dd_depth;
420 }
421
422 static int
423 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
424                       struct hlist_node *hnd)
425 {
426         struct cfs_hash_dhead_dep *dh;
427
428         dh = container_of(cfs_hash_dd_hhead(hs, bd),
429                           struct cfs_hash_dhead_dep, dd_head);
430         if (hnd->next == NULL) { /* it's the tail */
431                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
432                               container_of(hnd->pprev, struct hlist_node, next);
433         }
434         hlist_del_init(hnd);
435         return --dh->dd_depth;
436 }
437
438 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
439        .hop_hhead      = cfs_hash_hh_hhead,
440        .hop_hhead_size = cfs_hash_hh_hhead_size,
441        .hop_hnode_add  = cfs_hash_hh_hnode_add,
442        .hop_hnode_del  = cfs_hash_hh_hnode_del,
443 };
444
445 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
446        .hop_hhead      = cfs_hash_hd_hhead,
447        .hop_hhead_size = cfs_hash_hd_hhead_size,
448        .hop_hnode_add  = cfs_hash_hd_hnode_add,
449        .hop_hnode_del  = cfs_hash_hd_hnode_del,
450 };
451
452 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
453        .hop_hhead      = cfs_hash_dh_hhead,
454        .hop_hhead_size = cfs_hash_dh_hhead_size,
455        .hop_hnode_add  = cfs_hash_dh_hnode_add,
456        .hop_hnode_del  = cfs_hash_dh_hnode_del,
457 };
458
459 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
460        .hop_hhead      = cfs_hash_dd_hhead,
461        .hop_hhead_size = cfs_hash_dd_hhead_size,
462        .hop_hnode_add  = cfs_hash_dd_hnode_add,
463        .hop_hnode_del  = cfs_hash_dd_hnode_del,
464 };
465
466 static void
467 cfs_hash_hlist_setup(struct cfs_hash *hs)
468 {
469         if (cfs_hash_with_add_tail(hs)) {
470                 hs->hs_hops = cfs_hash_with_depth(hs) ?
471                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
472         } else {
473                 hs->hs_hops = cfs_hash_with_depth(hs) ?
474                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
475         }
476 }
477
478 static void
479 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
480                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
481 {
482         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
483
484         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
485
486         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
487         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
488 }
489
490 void
491 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
492 {
493         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
494         if (likely(hs->hs_rehash_buckets == NULL)) {
495                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
496                                      hs->hs_cur_bits, key, bd);
497         } else {
498                 LASSERT(hs->hs_rehash_bits != 0);
499                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
500                                      hs->hs_rehash_bits, key, bd);
501         }
502 }
503 EXPORT_SYMBOL(cfs_hash_bd_get);
504
505 static inline void
506 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
507 {
508         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
509                 return;
510
511         bd->bd_bucket->hsb_depmax = dep_cur;
512 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
513         if (likely(warn_on_depth == 0 ||
514                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
515                 return;
516
517         spin_lock(&hs->hs_dep_lock);
518         hs->hs_dep_max  = dep_cur;
519         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
520         hs->hs_dep_off  = bd->bd_offset;
521         hs->hs_dep_bits = hs->hs_cur_bits;
522         spin_unlock(&hs->hs_dep_lock);
523
524         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
525 # endif
526 }
527
528 void
529 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
530                         struct hlist_node *hnode)
531 {
532         int rc;
533
534         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
535         cfs_hash_bd_dep_record(hs, bd, rc);
536         bd->bd_bucket->hsb_version++;
537         if (unlikely(bd->bd_bucket->hsb_version == 0))
538                 bd->bd_bucket->hsb_version++;
539         bd->bd_bucket->hsb_count++;
540
541         if (cfs_hash_with_counter(hs))
542                 atomic_inc(&hs->hs_count);
543         if (!cfs_hash_with_no_itemref(hs))
544                 cfs_hash_get(hs, hnode);
545 }
546 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
547
548 void
549 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
550                        struct hlist_node *hnode)
551 {
552         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
553
554         LASSERT(bd->bd_bucket->hsb_count > 0);
555         bd->bd_bucket->hsb_count--;
556         bd->bd_bucket->hsb_version++;
557         if (unlikely(bd->bd_bucket->hsb_version == 0))
558                 bd->bd_bucket->hsb_version++;
559
560         if (cfs_hash_with_counter(hs)) {
561                 LASSERT(atomic_read(&hs->hs_count) > 0);
562                 atomic_dec(&hs->hs_count);
563         }
564         if (!cfs_hash_with_no_itemref(hs))
565                 cfs_hash_put_locked(hs, hnode);
566 }
567 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
568
569 void
570 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
571                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
572 {
573         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
574         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
575         int                rc;
576
577         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
578                 return;
579
580         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
581          * in cfs_hash_bd_del/add_locked */
582         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
583         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
584         cfs_hash_bd_dep_record(hs, bd_new, rc);
585
586         LASSERT(obkt->hsb_count > 0);
587         obkt->hsb_count--;
588         obkt->hsb_version++;
589         if (unlikely(obkt->hsb_version == 0))
590                 obkt->hsb_version++;
591         nbkt->hsb_count++;
592         nbkt->hsb_version++;
593         if (unlikely(nbkt->hsb_version == 0))
594                 nbkt->hsb_version++;
595 }
596
597 enum {
598         /** always set, for sanity (avoid ZERO intent) */
599         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
600         /** return entry with a ref */
601         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
602         /** add entry if not existing */
603         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
604         /** delete entry, ignore other masks */
605         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
606 };
607
608 enum cfs_hash_lookup_intent {
609         /** return item w/o refcount */
610         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
611         /** return item with refcount */
612         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
613                                        CFS_HS_LOOKUP_MASK_REF),
614         /** return item w/o refcount if existed, otherwise add */
615         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
616                                        CFS_HS_LOOKUP_MASK_ADD),
617         /** return item with refcount if existed, otherwise add */
618         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
619                                        CFS_HS_LOOKUP_MASK_ADD),
620         /** delete if existed */
621         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
622                                        CFS_HS_LOOKUP_MASK_DEL)
623 };
624
625 static struct hlist_node *
626 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
627                           const void *key, struct hlist_node *hnode,
628                           enum cfs_hash_lookup_intent intent)
629
630 {
631         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
632         struct hlist_node  *ehnode;
633         struct hlist_node  *match;
634         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
635
636         /* with this function, we can avoid a lot of useless refcount ops,
637          * which are expensive atomic operations most time. */
638         match = intent_add ? NULL : hnode;
639         hlist_for_each(ehnode, hhead) {
640                 if (!cfs_hash_keycmp(hs, key, ehnode))
641                         continue;
642
643                 if (match != NULL && match != ehnode) /* can't match */
644                         continue;
645
646                 /* match and ... */
647                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
648                         cfs_hash_bd_del_locked(hs, bd, ehnode);
649                         return ehnode;
650                 }
651
652                 /* caller wants refcount? */
653                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
654                         cfs_hash_get(hs, ehnode);
655                 return ehnode;
656         }
657         /* no match item */
658         if (!intent_add)
659                 return NULL;
660
661         LASSERT(hnode != NULL);
662         cfs_hash_bd_add_locked(hs, bd, hnode);
663         return hnode;
664 }
665
666 struct hlist_node *
667 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
668                           const void *key)
669 {
670         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
671                                         CFS_HS_LOOKUP_IT_FIND);
672 }
673 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
674
675 struct hlist_node *
676 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
677                         const void *key)
678 {
679         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
680                                         CFS_HS_LOOKUP_IT_PEEK);
681 }
682 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
683
684 struct hlist_node *
685 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
686                            const void *key, struct hlist_node *hnode,
687                            int noref)
688 {
689         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
690                                         CFS_HS_LOOKUP_IT_ADD |
691                                         (!noref * CFS_HS_LOOKUP_MASK_REF));
692 }
693
694 struct hlist_node *
695 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
696                            const void *key, struct hlist_node *hnode)
697 {
698         /* hnode can be NULL, we find the first item with @key */
699         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
700                                         CFS_HS_LOOKUP_IT_FINDDEL);
701 }
702
703 static void
704 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
705                        unsigned n, int excl)
706 {
707         struct cfs_hash_bucket *prev = NULL;
708         int                i;
709
710         /**
711          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
712          * NB: it's possible that several bds point to the same bucket but
713          * have different bd::bd_offset, so need take care of deadlock.
714          */
715         cfs_hash_for_each_bd(bds, n, i) {
716                 if (prev == bds[i].bd_bucket)
717                         continue;
718
719                 LASSERT(prev == NULL ||
720                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
721                 cfs_hash_bd_lock(hs, &bds[i], excl);
722                 prev = bds[i].bd_bucket;
723         }
724 }
725
726 static void
727 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
728                          unsigned n, int excl)
729 {
730         struct cfs_hash_bucket *prev = NULL;
731         int                i;
732
733         cfs_hash_for_each_bd(bds, n, i) {
734                 if (prev != bds[i].bd_bucket) {
735                         cfs_hash_bd_unlock(hs, &bds[i], excl);
736                         prev = bds[i].bd_bucket;
737                 }
738         }
739 }
740
741 static struct hlist_node *
742 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
743                                 unsigned n, const void *key)
744 {
745         struct hlist_node *ehnode;
746         unsigned          i;
747
748         cfs_hash_for_each_bd(bds, n, i) {
749                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
750                                                         CFS_HS_LOOKUP_IT_FIND);
751                 if (ehnode != NULL)
752                         return ehnode;
753         }
754         return NULL;
755 }
756
757 static struct hlist_node *
758 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
759                                  unsigned n, const void *key,
760                                  struct hlist_node *hnode, int noref)
761 {
762         struct hlist_node *ehnode;
763         int               intent;
764         unsigned          i;
765
766         LASSERT(hnode != NULL);
767         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
768
769         cfs_hash_for_each_bd(bds, n, i) {
770                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
771                                                    NULL, intent);
772                 if (ehnode != NULL)
773                         return ehnode;
774         }
775
776         if (i == 1) { /* only one bucket */
777                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
778         } else {
779                 struct cfs_hash_bd      mybd;
780
781                 cfs_hash_bd_get(hs, key, &mybd);
782                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
783         }
784
785         return hnode;
786 }
787
788 static struct hlist_node *
789 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
790                                  unsigned n, const void *key,
791                                  struct hlist_node *hnode)
792 {
793         struct hlist_node *ehnode;
794         unsigned           i;
795
796         cfs_hash_for_each_bd(bds, n, i) {
797                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
798                                                    CFS_HS_LOOKUP_IT_FINDDEL);
799                 if (ehnode != NULL)
800                         return ehnode;
801         }
802         return NULL;
803 }
804
805 static void
806 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
807 {
808         int     rc;
809
810         if (bd2->bd_bucket == NULL)
811                 return;
812
813         if (bd1->bd_bucket == NULL) {
814                 *bd1 = *bd2;
815                 bd2->bd_bucket = NULL;
816                 return;
817         }
818
819         rc = cfs_hash_bd_compare(bd1, bd2);
820         if (rc == 0) {
821                 bd2->bd_bucket = NULL;
822
823         } else if (rc > 0) { /* swab bd1 and bd2 */
824                 struct cfs_hash_bd tmp;
825
826                 tmp = *bd2;
827                 *bd2 = *bd1;
828                 *bd1 = tmp;
829         }
830 }
831
832 void
833 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
834                      struct cfs_hash_bd *bds)
835 {
836         /* NB: caller should hold hs_lock.rw if REHASH is set */
837         cfs_hash_bd_from_key(hs, hs->hs_buckets,
838                              hs->hs_cur_bits, key, &bds[0]);
839         if (likely(hs->hs_rehash_buckets == NULL)) {
840                 /* no rehash or not rehashing */
841                 bds[1].bd_bucket = NULL;
842                 return;
843         }
844
845         LASSERT(hs->hs_rehash_bits != 0);
846         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
847                              hs->hs_rehash_bits, key, &bds[1]);
848
849         cfs_hash_bd_order(&bds[0], &bds[1]);
850 }
851
852 void
853 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
854 {
855         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
856 }
857
858 void
859 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
860 {
861         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
862 }
863
864 struct hlist_node *
865 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
866                                const void *key)
867 {
868         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
869 }
870
871 struct hlist_node *
872 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
873                                 const void *key, struct hlist_node *hnode,
874                                 int noref)
875 {
876         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
877                                                 hnode, noref);
878 }
879
880 struct hlist_node *
881 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
882                                 const void *key, struct hlist_node *hnode)
883 {
884         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
885 }
886
887 static void
888 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
889                       int bkt_size, int prev_size, int size)
890 {
891         int     i;
892
893         for (i = prev_size; i < size; i++) {
894                 if (buckets[i] != NULL)
895                         LIBCFS_FREE(buckets[i], bkt_size);
896         }
897
898         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
899 }
900
901 /*
902  * Create or grow bucket memory. Return old_buckets if no allocation was
903  * needed, the newly allocated buckets if allocation was needed and
904  * successful, and NULL on error.
905  */
906 static struct cfs_hash_bucket **
907 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
908                          unsigned int old_size, unsigned int new_size)
909 {
910         struct cfs_hash_bucket **new_bkts;
911         int                 i;
912
913         LASSERT(old_size == 0 || old_bkts != NULL);
914
915         if (old_bkts != NULL && old_size == new_size)
916                 return old_bkts;
917
918         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
919         if (new_bkts == NULL)
920                 return NULL;
921
922         if (old_bkts != NULL) {
923                 memcpy(new_bkts, old_bkts,
924                        min(old_size, new_size) * sizeof(*old_bkts));
925         }
926
927         for (i = old_size; i < new_size; i++) {
928                 struct hlist_head *hhead;
929                 struct cfs_hash_bd     bd;
930
931                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
932                 if (new_bkts[i] == NULL) {
933                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
934                                               old_size, new_size);
935                         return NULL;
936                 }
937
938                 new_bkts[i]->hsb_index   = i;
939                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
940                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
941                 bd.bd_bucket = new_bkts[i];
942                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
943                         INIT_HLIST_HEAD(hhead);
944
945                 if (cfs_hash_with_no_lock(hs) ||
946                     cfs_hash_with_no_bktlock(hs))
947                         continue;
948
949                 if (cfs_hash_with_rw_bktlock(hs))
950                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
951                 else if (cfs_hash_with_spin_bktlock(hs))
952                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
953                 else
954                         LBUG(); /* invalid use-case */
955         }
956         return new_bkts;
957 }
958
959 /**
960  * Initialize new libcfs hash, where:
961  * @name     - Descriptive hash name
962  * @cur_bits - Initial hash table size, in bits
963  * @max_bits - Maximum allowed hash table resize, in bits
964  * @ops      - Registered hash table operations
965  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
966  *           - CFS_HASH_SORT enable chained hash sort
967  */
968 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
969
970 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
971 static int cfs_hash_dep_print(struct cfs_workitem *wi)
972 {
973         struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
974         int         dep;
975         int         bkt;
976         int         off;
977         int         bits;
978
979         spin_lock(&hs->hs_dep_lock);
980         dep  = hs->hs_dep_max;
981         bkt  = hs->hs_dep_bkt;
982         off  = hs->hs_dep_off;
983         bits = hs->hs_dep_bits;
984         spin_unlock(&hs->hs_dep_lock);
985
986         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
987                       hs->hs_name, bits, dep, bkt, off);
988         spin_lock(&hs->hs_dep_lock);
989         hs->hs_dep_bits = 0; /* mark as workitem done */
990         spin_unlock(&hs->hs_dep_lock);
991         return 0;
992 }
993
994 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
995 {
996         spin_lock_init(&hs->hs_dep_lock);
997         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
998 }
999
1000 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1001 {
1002         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1003                 return;
1004
1005         spin_lock(&hs->hs_dep_lock);
1006         while (hs->hs_dep_bits != 0) {
1007                 spin_unlock(&hs->hs_dep_lock);
1008                 cond_resched();
1009                 spin_lock(&hs->hs_dep_lock);
1010         }
1011         spin_unlock(&hs->hs_dep_lock);
1012 }
1013
1014 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1015
1016 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1017 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1018
1019 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1020
1021 struct cfs_hash *
1022 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1023                 unsigned bkt_bits, unsigned extra_bytes,
1024                 unsigned min_theta, unsigned max_theta,
1025                 struct cfs_hash_ops *ops, unsigned flags)
1026 {
1027         struct cfs_hash *hs;
1028         int         len;
1029
1030         ENTRY;
1031
1032         CLASSERT(CFS_HASH_THETA_BITS < 15);
1033
1034         LASSERT(name != NULL);
1035         LASSERT(ops != NULL);
1036         LASSERT(ops->hs_key);
1037         LASSERT(ops->hs_hash);
1038         LASSERT(ops->hs_object);
1039         LASSERT(ops->hs_keycmp);
1040         LASSERT(ops->hs_get != NULL);
1041         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1042
1043         if ((flags & CFS_HASH_REHASH) != 0)
1044                 flags |= CFS_HASH_COUNTER; /* must have counter */
1045
1046         LASSERT(cur_bits > 0);
1047         LASSERT(cur_bits >= bkt_bits);
1048         LASSERT(max_bits >= cur_bits && max_bits < 31);
1049         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1050         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1051                      (flags & CFS_HASH_NO_LOCK) == 0));
1052         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1053                       ops->hs_keycpy != NULL));
1054
1055         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1056               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1057         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1058         if (hs == NULL)
1059                 RETURN(NULL);
1060
1061         strlcpy(hs->hs_name, name, len);
1062         hs->hs_flags = flags;
1063
1064         atomic_set(&hs->hs_refcount, 1);
1065         atomic_set(&hs->hs_count, 0);
1066
1067         cfs_hash_lock_setup(hs);
1068         cfs_hash_hlist_setup(hs);
1069
1070         hs->hs_cur_bits = (__u8)cur_bits;
1071         hs->hs_min_bits = (__u8)cur_bits;
1072         hs->hs_max_bits = (__u8)max_bits;
1073         hs->hs_bkt_bits = (__u8)bkt_bits;
1074
1075         hs->hs_ops         = ops;
1076         hs->hs_extra_bytes = extra_bytes;
1077         hs->hs_rehash_bits = 0;
1078         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1079         cfs_hash_depth_wi_init(hs);
1080
1081         if (cfs_hash_with_rehash(hs))
1082                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1083
1084         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1085                                                   CFS_HASH_NBKT(hs));
1086         if (hs->hs_buckets != NULL)
1087                 return hs;
1088
1089         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1090         RETURN(NULL);
1091 }
1092 EXPORT_SYMBOL(cfs_hash_create);
1093
1094 /**
1095  * Cleanup libcfs hash @hs.
1096  */
1097 static void
1098 cfs_hash_destroy(struct cfs_hash *hs)
1099 {
1100         struct hlist_node     *hnode;
1101         struct hlist_node     *pos;
1102         struct cfs_hash_bd         bd;
1103         int                   i;
1104         ENTRY;
1105
1106         LASSERT(hs != NULL);
1107         LASSERT(!cfs_hash_is_exiting(hs) &&
1108                 !cfs_hash_is_iterating(hs));
1109
1110         /**
1111          * prohibit further rehashes, don't need any lock because
1112          * I'm the only (last) one can change it.
1113          */
1114         hs->hs_exiting = 1;
1115         if (cfs_hash_with_rehash(hs))
1116                 cfs_hash_rehash_cancel(hs);
1117
1118         cfs_hash_depth_wi_cancel(hs);
1119         /* rehash should be done/canceled */
1120         LASSERT(hs->hs_buckets != NULL &&
1121                 hs->hs_rehash_buckets == NULL);
1122
1123         cfs_hash_for_each_bucket(hs, &bd, i) {
1124                 struct hlist_head *hhead;
1125
1126                 LASSERT(bd.bd_bucket != NULL);
1127                 /* no need to take this lock, just for consistent code */
1128                 cfs_hash_bd_lock(hs, &bd, 1);
1129
1130                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1131                         hlist_for_each_safe(hnode, pos, hhead) {
1132                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1133                                         "hash %s bucket %u(%u) is not "
1134                                         " empty: %u items left\n",
1135                                         hs->hs_name, bd.bd_bucket->hsb_index,
1136                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1137                                 /* can't assert key valicate, because we
1138                                  * can interrupt rehash */
1139                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1140                                 cfs_hash_exit(hs, hnode);
1141                         }
1142                 }
1143                 LASSERT(bd.bd_bucket->hsb_count == 0);
1144                 cfs_hash_bd_unlock(hs, &bd, 1);
1145                 cond_resched();
1146         }
1147
1148         LASSERT(atomic_read(&hs->hs_count) == 0);
1149
1150         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1151                               0, CFS_HASH_NBKT(hs));
1152         i = cfs_hash_with_bigname(hs) ?
1153             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1154         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1155
1156         EXIT;
1157 }
1158
1159 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1160 {
1161         if (atomic_inc_not_zero(&hs->hs_refcount))
1162                 return hs;
1163         return NULL;
1164 }
1165 EXPORT_SYMBOL(cfs_hash_getref);
1166
1167 void cfs_hash_putref(struct cfs_hash *hs)
1168 {
1169         if (atomic_dec_and_test(&hs->hs_refcount))
1170                 cfs_hash_destroy(hs);
1171 }
1172 EXPORT_SYMBOL(cfs_hash_putref);
1173
1174 static inline int
1175 cfs_hash_rehash_bits(struct cfs_hash *hs)
1176 {
1177         if (cfs_hash_with_no_lock(hs) ||
1178             !cfs_hash_with_rehash(hs))
1179                 return -EOPNOTSUPP;
1180
1181         if (unlikely(cfs_hash_is_exiting(hs)))
1182                 return -ESRCH;
1183
1184         if (unlikely(cfs_hash_is_rehashing(hs)))
1185                 return -EALREADY;
1186
1187         if (unlikely(cfs_hash_is_iterating(hs)))
1188                 return -EAGAIN;
1189
1190         /* XXX: need to handle case with max_theta != 2.0
1191          *      and the case with min_theta != 0.5 */
1192         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1193             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1194                 return hs->hs_cur_bits + 1;
1195
1196         if (!cfs_hash_with_shrink(hs))
1197                 return 0;
1198
1199         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1200             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1201                 return hs->hs_cur_bits - 1;
1202
1203         return 0;
1204 }
1205
1206 /**
1207  * don't allow inline rehash if:
1208  * - user wants non-blocking change (add/del) on hash table
1209  * - too many elements
1210  */
1211 static inline int
1212 cfs_hash_rehash_inline(struct cfs_hash *hs)
1213 {
1214         return !cfs_hash_with_nblk_change(hs) &&
1215                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1216 }
1217
1218 /**
1219  * Add item @hnode to libcfs hash @hs using @key.  The registered
1220  * ops->hs_get function will be called when the item is added.
1221  */
1222 void
1223 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1224 {
1225         struct cfs_hash_bd   bd;
1226         int             bits;
1227
1228         LASSERT(hlist_unhashed(hnode));
1229
1230         cfs_hash_lock(hs, 0);
1231         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1232
1233         cfs_hash_key_validate(hs, key, hnode);
1234         cfs_hash_bd_add_locked(hs, &bd, hnode);
1235
1236         cfs_hash_bd_unlock(hs, &bd, 1);
1237
1238         bits = cfs_hash_rehash_bits(hs);
1239         cfs_hash_unlock(hs, 0);
1240         if (bits > 0)
1241                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1242 }
1243 EXPORT_SYMBOL(cfs_hash_add);
1244
1245 static struct hlist_node *
1246 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1247                      struct hlist_node *hnode, int noref)
1248 {
1249         struct hlist_node *ehnode;
1250         struct cfs_hash_bd     bds[2];
1251         int               bits = 0;
1252
1253         LASSERT(hlist_unhashed(hnode));
1254
1255         cfs_hash_lock(hs, 0);
1256         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1257
1258         cfs_hash_key_validate(hs, key, hnode);
1259         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1260                                                  hnode, noref);
1261         cfs_hash_dual_bd_unlock(hs, bds, 1);
1262
1263         if (ehnode == hnode) /* new item added */
1264                 bits = cfs_hash_rehash_bits(hs);
1265         cfs_hash_unlock(hs, 0);
1266         if (bits > 0)
1267                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1268
1269         return ehnode;
1270 }
1271
1272 /**
1273  * Add item @hnode to libcfs hash @hs using @key.  The registered
1274  * ops->hs_get function will be called if the item was added.
1275  * Returns 0 on success or -EALREADY on key collisions.
1276  */
1277 int
1278 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1279                     struct hlist_node *hnode)
1280 {
1281         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1282                -EALREADY : 0;
1283 }
1284 EXPORT_SYMBOL(cfs_hash_add_unique);
1285
1286 /**
1287  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1288  * already exists in the hash then ops->hs_get will be called on the
1289  * conflicting entry and that entry will be returned to the caller.
1290  * Otherwise ops->hs_get is called on the item which was added.
1291  */
1292 void *
1293 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1294                         struct hlist_node *hnode)
1295 {
1296         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1297
1298         return cfs_hash_object(hs, hnode);
1299 }
1300 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1301
1302 /**
1303  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1304  * is required to ensure the correct hash bucket is locked since there
1305  * is no direct linkage from the item to the bucket.  The object
1306  * removed from the hash will be returned and obs->hs_put is called
1307  * on the removed object.
1308  */
1309 void *
1310 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1311 {
1312         void           *obj  = NULL;
1313         int             bits = 0;
1314         struct cfs_hash_bd   bds[2];
1315
1316         cfs_hash_lock(hs, 0);
1317         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1318
1319         /* NB: do nothing if @hnode is not in hash table */
1320         if (hnode == NULL || !hlist_unhashed(hnode)) {
1321                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1322                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1323                 } else {
1324                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1325                                                                 key, hnode);
1326                 }
1327         }
1328
1329         if (hnode != NULL) {
1330                 obj  = cfs_hash_object(hs, hnode);
1331                 bits = cfs_hash_rehash_bits(hs);
1332         }
1333
1334         cfs_hash_dual_bd_unlock(hs, bds, 1);
1335         cfs_hash_unlock(hs, 0);
1336         if (bits > 0)
1337                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1338
1339         return obj;
1340 }
1341 EXPORT_SYMBOL(cfs_hash_del);
1342
1343 /**
1344  * Delete item given @key in libcfs hash @hs.  The first @key found in
1345  * the hash will be removed, if the key exists multiple times in the hash
1346  * @hs this function must be called once per key.  The removed object
1347  * will be returned and ops->hs_put is called on the removed object.
1348  */
1349 void *
1350 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1351 {
1352         return cfs_hash_del(hs, key, NULL);
1353 }
1354 EXPORT_SYMBOL(cfs_hash_del_key);
1355
1356 /**
1357  * Lookup an item using @key in the libcfs hash @hs and return it.
1358  * If the @key is found in the hash hs->hs_get() is called and the
1359  * matching objects is returned.  It is the callers responsibility
1360  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1361  * when when finished with the object.  If the @key was not found
1362  * in the hash @hs NULL is returned.
1363  */
1364 void *
1365 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1366 {
1367         void                 *obj = NULL;
1368         struct hlist_node     *hnode;
1369         struct cfs_hash_bd         bds[2];
1370
1371         cfs_hash_lock(hs, 0);
1372         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1373
1374         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1375         if (hnode != NULL)
1376                 obj = cfs_hash_object(hs, hnode);
1377
1378         cfs_hash_dual_bd_unlock(hs, bds, 0);
1379         cfs_hash_unlock(hs, 0);
1380
1381         return obj;
1382 }
1383 EXPORT_SYMBOL(cfs_hash_lookup);
1384
1385 static void
1386 cfs_hash_for_each_enter(struct cfs_hash *hs)
1387 {
1388         LASSERT(!cfs_hash_is_exiting(hs));
1389
1390         if (!cfs_hash_with_rehash(hs))
1391                 return;
1392         /*
1393          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1394          * because it's just an unreliable signal to rehash-thread,
1395          * rehash-thread will try to finish rehash ASAP when seeing this.
1396          */
1397         hs->hs_iterating = 1;
1398
1399         cfs_hash_lock(hs, 1);
1400         hs->hs_iterators++;
1401
1402         /* NB: iteration is mostly called by service thread,
1403          * we tend to cancel pending rehash-request, instead of
1404          * blocking service thread, we will relaunch rehash request
1405          * after iteration */
1406         if (cfs_hash_is_rehashing(hs))
1407                 cfs_hash_rehash_cancel_locked(hs);
1408         cfs_hash_unlock(hs, 1);
1409 }
1410
1411 static void
1412 cfs_hash_for_each_exit(struct cfs_hash *hs)
1413 {
1414         int remained;
1415         int bits;
1416
1417         if (!cfs_hash_with_rehash(hs))
1418                 return;
1419         cfs_hash_lock(hs, 1);
1420         remained = --hs->hs_iterators;
1421         bits = cfs_hash_rehash_bits(hs);
1422         cfs_hash_unlock(hs, 1);
1423         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1424         if (remained == 0)
1425                 hs->hs_iterating = 0;
1426         if (bits > 0) {
1427                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1428                                     CFS_HASH_LOOP_HOG);
1429         }
1430 }
1431
1432 /**
1433  * For each item in the libcfs hash @hs call the passed callback @func
1434  * and pass to it as an argument each hash item and the private @data.
1435  *
1436  * a) the function may sleep!
1437  * b) during the callback:
1438  *    . the bucket lock is held so the callback must never sleep.
1439  *    . if @removal_safe is true, use can remove current item by
1440  *      cfs_hash_bd_del_locked
1441  */
1442 static __u64
1443 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1444                         void *data, int remove_safe)
1445 {
1446         struct hlist_node       *hnode;
1447         struct hlist_node       *pos;
1448         struct cfs_hash_bd      bd;
1449         __u64                   count = 0;
1450         int                     excl  = !!remove_safe;
1451         int                     loop  = 0;
1452         int                     i;
1453         ENTRY;
1454
1455         cfs_hash_for_each_enter(hs);
1456
1457         cfs_hash_lock(hs, 0);
1458         LASSERT(!cfs_hash_is_rehashing(hs));
1459
1460         cfs_hash_for_each_bucket(hs, &bd, i) {
1461                 struct hlist_head *hhead;
1462
1463                 cfs_hash_bd_lock(hs, &bd, excl);
1464                 if (func == NULL) { /* only glimpse size */
1465                         count += bd.bd_bucket->hsb_count;
1466                         cfs_hash_bd_unlock(hs, &bd, excl);
1467                         continue;
1468                 }
1469
1470                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1471                         hlist_for_each_safe(hnode, pos, hhead) {
1472                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1473                                 count++;
1474                                 loop++;
1475                                 if (func(hs, &bd, hnode, data)) {
1476                                         cfs_hash_bd_unlock(hs, &bd, excl);
1477                                         goto out;
1478                                 }
1479                         }
1480                 }
1481                 cfs_hash_bd_unlock(hs, &bd, excl);
1482                 if (loop < CFS_HASH_LOOP_HOG)
1483                         continue;
1484                 loop = 0;
1485                 cfs_hash_unlock(hs, 0);
1486                 cond_resched();
1487                 cfs_hash_lock(hs, 0);
1488         }
1489  out:
1490         cfs_hash_unlock(hs, 0);
1491
1492         cfs_hash_for_each_exit(hs);
1493         RETURN(count);
1494 }
1495
1496 struct cfs_hash_cond_arg {
1497         cfs_hash_cond_opt_cb_t  func;
1498         void                   *arg;
1499 };
1500
1501 static int
1502 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1503                          struct hlist_node *hnode, void *data)
1504 {
1505         struct cfs_hash_cond_arg *cond = data;
1506
1507         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1508                 cfs_hash_bd_del_locked(hs, bd, hnode);
1509         return 0;
1510 }
1511
1512 /**
1513  * Delete item from the libcfs hash @hs when @func return true.
1514  * The write lock being hold during loop for each bucket to avoid
1515  * any object be reference.
1516  */
1517 void
1518 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1519 {
1520         struct cfs_hash_cond_arg arg = {
1521                 .func   = func,
1522                 .arg    = data,
1523         };
1524
1525         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1526 }
1527 EXPORT_SYMBOL(cfs_hash_cond_del);
1528
1529 void
1530 cfs_hash_for_each(struct cfs_hash *hs,
1531                   cfs_hash_for_each_cb_t func, void *data)
1532 {
1533         cfs_hash_for_each_tight(hs, func, data, 0);
1534 }
1535 EXPORT_SYMBOL(cfs_hash_for_each);
1536
1537 void
1538 cfs_hash_for_each_safe(struct cfs_hash *hs,
1539                        cfs_hash_for_each_cb_t func, void *data)
1540 {
1541         cfs_hash_for_each_tight(hs, func, data, 1);
1542 }
1543 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1544
1545 static int
1546 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1547               struct hlist_node *hnode, void *data)
1548 {
1549         *(int *)data = 0;
1550         return 1; /* return 1 to break the loop */
1551 }
1552
1553 int
1554 cfs_hash_is_empty(struct cfs_hash *hs)
1555 {
1556         int empty = 1;
1557
1558         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1559         return empty;
1560 }
1561 EXPORT_SYMBOL(cfs_hash_is_empty);
1562
1563 __u64
1564 cfs_hash_size_get(struct cfs_hash *hs)
1565 {
1566         return cfs_hash_with_counter(hs) ?
1567                atomic_read(&hs->hs_count) :
1568                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1569 }
1570 EXPORT_SYMBOL(cfs_hash_size_get);
1571
1572 /*
1573  * cfs_hash_for_each_relax:
1574  * Iterate the hash table and call @func on each item without
1575  * any lock. This function can't guarantee to finish iteration
1576  * if these features are enabled:
1577  *
1578  *  a. if rehash_key is enabled, an item can be moved from
1579  *     one bucket to another bucket
1580  *  b. user can remove non-zero-ref item from hash-table,
1581  *     so the item can be removed from hash-table, even worse,
1582  *     it's possible that user changed key and insert to another
1583  *     hash bucket.
1584  * there's no way for us to finish iteration correctly on previous
1585  * two cases, so iteration has to be stopped on change.
1586  */
1587 static int
1588 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1589                         void *data, int start)
1590 {
1591         struct hlist_node       *hnode;
1592         struct hlist_node       *next = NULL;
1593         struct cfs_hash_bd      bd;
1594         __u32                   version;
1595         int                     count = 0;
1596         int                     stop_on_change;
1597         int                     has_put_locked;
1598         int                     rc = 0;
1599         int                     i, end = -1;
1600         ENTRY;
1601
1602         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1603                          !cfs_hash_with_no_itemref(hs);
1604         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1605         cfs_hash_lock(hs, 0);
1606 again:
1607         LASSERT(!cfs_hash_is_rehashing(hs));
1608
1609         cfs_hash_for_each_bucket(hs, &bd, i) {
1610                 struct hlist_head *hhead;
1611
1612                 if (i < start)
1613                         continue;
1614                 else if (end > 0 && i >= end)
1615                         break;
1616
1617                 cfs_hash_bd_lock(hs, &bd, 0);
1618                 version = cfs_hash_bd_version_get(&bd);
1619
1620                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1621                         hnode = hhead->first;
1622                         if (hnode == NULL)
1623                                 continue;
1624                         cfs_hash_get(hs, hnode);
1625                         for (; hnode != NULL; hnode = next) {
1626                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1627                                 next = hnode->next;
1628                                 if (next != NULL)
1629                                         cfs_hash_get(hs, next);
1630                                 cfs_hash_bd_unlock(hs, &bd, 0);
1631                                 cfs_hash_unlock(hs, 0);
1632
1633                                 rc = func(hs, &bd, hnode, data);
1634                                 if (stop_on_change || !has_put_locked)
1635                                         cfs_hash_put(hs, hnode);
1636
1637                                 cond_resched();
1638                                 count++;
1639
1640                                 cfs_hash_lock(hs, 0);
1641                                 cfs_hash_bd_lock(hs, &bd, 0);
1642                                 if (stop_on_change) {
1643                                         if (version !=
1644                                             cfs_hash_bd_version_get(&bd))
1645                                                 rc = -EINTR;
1646                                 } else if (has_put_locked) {
1647                                         cfs_hash_put_locked(hs, hnode);
1648                                 }
1649                                 if (rc) /* callback wants to break iteration */
1650                                         break;
1651                         }
1652                         if (next != NULL) {
1653                                 if (has_put_locked) {
1654                                         cfs_hash_put_locked(hs, next);
1655                                         next = NULL;
1656                                 }
1657                                 break;
1658                         } else if (rc != 0) {
1659                                 break;
1660                         }
1661                 }
1662                 cfs_hash_bd_unlock(hs, &bd, 0);
1663                 if (next != NULL && !has_put_locked) {
1664                         cfs_hash_put(hs, next);
1665                         next = NULL;
1666                 }
1667                 if (rc) /* callback wants to break iteration */
1668                         break;
1669         }
1670
1671         if (start > 0 && rc == 0) {
1672                 end = start;
1673                 start = 0;
1674                 goto again;
1675         }
1676
1677         cfs_hash_unlock(hs, 0);
1678         return count;
1679 }
1680
1681 int
1682 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1683                          cfs_hash_for_each_cb_t func, void *data, int start)
1684 {
1685         ENTRY;
1686
1687         if (cfs_hash_with_no_lock(hs) ||
1688             cfs_hash_with_rehash_key(hs) ||
1689             !cfs_hash_with_no_itemref(hs))
1690                 RETURN(-EOPNOTSUPP);
1691
1692         if (hs->hs_ops->hs_get == NULL ||
1693            (hs->hs_ops->hs_put == NULL &&
1694             hs->hs_ops->hs_put_locked == NULL))
1695                 RETURN(-EOPNOTSUPP);
1696
1697         cfs_hash_for_each_enter(hs);
1698         cfs_hash_for_each_relax(hs, func, data, start);
1699         cfs_hash_for_each_exit(hs);
1700
1701         RETURN(0);
1702 }
1703 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1704
1705 /**
1706  * For each hash bucket in the libcfs hash @hs call the passed callback
1707  * @func until all the hash buckets are empty.  The passed callback @func
1708  * or the previously registered callback hs->hs_put must remove the item
1709  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1710  * functions.  No rwlocks will be held during the callback @func it is
1711  * safe to sleep if needed.  This function will not terminate until the
1712  * hash is empty.  Note it is still possible to concurrently add new
1713  * items in to the hash.  It is the callers responsibility to ensure
1714  * the required locking is in place to prevent concurrent insertions.
1715  */
1716 int
1717 cfs_hash_for_each_empty(struct cfs_hash *hs,
1718                         cfs_hash_for_each_cb_t func, void *data)
1719 {
1720         unsigned  i = 0;
1721         ENTRY;
1722
1723         if (cfs_hash_with_no_lock(hs))
1724                 return -EOPNOTSUPP;
1725
1726         if (hs->hs_ops->hs_get == NULL ||
1727            (hs->hs_ops->hs_put == NULL &&
1728             hs->hs_ops->hs_put_locked == NULL))
1729                 return -EOPNOTSUPP;
1730
1731         cfs_hash_for_each_enter(hs);
1732         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1733                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1734                        hs->hs_name, i++);
1735         }
1736         cfs_hash_for_each_exit(hs);
1737         RETURN(0);
1738 }
1739 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1740
1741 void
1742 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1743                         cfs_hash_for_each_cb_t func, void *data)
1744 {
1745         struct hlist_head *hhead;
1746         struct hlist_node *hnode;
1747         struct cfs_hash_bd         bd;
1748
1749         cfs_hash_for_each_enter(hs);
1750         cfs_hash_lock(hs, 0);
1751         if (hindex >= CFS_HASH_NHLIST(hs))
1752                 goto out;
1753
1754         cfs_hash_bd_index_set(hs, hindex, &bd);
1755
1756         cfs_hash_bd_lock(hs, &bd, 0);
1757         hhead = cfs_hash_bd_hhead(hs, &bd);
1758         hlist_for_each(hnode, hhead) {
1759                 if (func(hs, &bd, hnode, data))
1760                         break;
1761         }
1762         cfs_hash_bd_unlock(hs, &bd, 0);
1763 out:
1764         cfs_hash_unlock(hs, 0);
1765         cfs_hash_for_each_exit(hs);
1766 }
1767
1768 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1769
1770 /*
1771  * For each item in the libcfs hash @hs which matches the @key call
1772  * the passed callback @func and pass to it as an argument each hash
1773  * item and the private @data. During the callback the bucket lock
1774  * is held so the callback must never sleep.
1775    */
1776 void
1777 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1778                         cfs_hash_for_each_cb_t func, void *data)
1779 {
1780         struct hlist_node *hnode;
1781         struct cfs_hash_bd         bds[2];
1782         unsigned           i;
1783
1784         cfs_hash_lock(hs, 0);
1785
1786         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1787
1788         cfs_hash_for_each_bd(bds, 2, i) {
1789                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1790
1791                 hlist_for_each(hnode, hlist) {
1792                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1793
1794                         if (cfs_hash_keycmp(hs, key, hnode)) {
1795                                 if (func(hs, &bds[i], hnode, data))
1796                                         break;
1797                         }
1798                 }
1799         }
1800
1801         cfs_hash_dual_bd_unlock(hs, bds, 0);
1802         cfs_hash_unlock(hs, 0);
1803 }
1804 EXPORT_SYMBOL(cfs_hash_for_each_key);
1805
1806 /**
1807  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1808  * to grow the hash size when excessive chaining is detected, or to
1809  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1810  * flag is set in @hs the libcfs hash may be dynamically rehashed
1811  * during addition or removal if the hash's theta value exceeds
1812  * either the hs->hs_min_theta or hs->max_theta values.  By default
1813  * these values are tuned to keep the chained hash depth small, and
1814  * this approach assumes a reasonably uniform hashing function.  The
1815  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1816  */
1817 void
1818 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1819 {
1820         int     i;
1821
1822         /* need hold cfs_hash_lock(hs, 1) */
1823         LASSERT(cfs_hash_with_rehash(hs) &&
1824                 !cfs_hash_with_no_lock(hs));
1825
1826         if (!cfs_hash_is_rehashing(hs))
1827                 return;
1828
1829         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1830                 hs->hs_rehash_bits = 0;
1831                 return;
1832         }
1833
1834         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1835                 cfs_hash_unlock(hs, 1);
1836                 /* raise console warning while waiting too long */
1837                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1838                        "hash %s is still rehashing, rescheded %d\n",
1839                        hs->hs_name, i - 1);
1840                 cond_resched();
1841                 cfs_hash_lock(hs, 1);
1842         }
1843 }
1844
1845 void
1846 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1847 {
1848         cfs_hash_lock(hs, 1);
1849         cfs_hash_rehash_cancel_locked(hs);
1850         cfs_hash_unlock(hs, 1);
1851 }
1852
1853 int
1854 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1855 {
1856         int     rc;
1857
1858         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1859
1860         cfs_hash_lock(hs, 1);
1861
1862         rc = cfs_hash_rehash_bits(hs);
1863         if (rc <= 0) {
1864                 cfs_hash_unlock(hs, 1);
1865                 return rc;
1866         }
1867
1868         hs->hs_rehash_bits = rc;
1869         if (!do_rehash) {
1870                 /* launch and return */
1871                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1872                 cfs_hash_unlock(hs, 1);
1873                 return 0;
1874         }
1875
1876         /* rehash right now */
1877         cfs_hash_unlock(hs, 1);
1878
1879         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1880 }
1881
1882 static int
1883 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1884 {
1885         struct cfs_hash_bd      new;
1886         struct hlist_head *hhead;
1887         struct hlist_node *hnode;
1888         struct hlist_node *pos;
1889         void              *key;
1890         int                c = 0;
1891
1892         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1893         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1894                 hlist_for_each_safe(hnode, pos, hhead) {
1895                         key = cfs_hash_key(hs, hnode);
1896                         LASSERT(key != NULL);
1897                         /* Validate hnode is in the correct bucket. */
1898                         cfs_hash_bucket_validate(hs, old, hnode);
1899                         /*
1900                          * Delete from old hash bucket; move to new bucket.
1901                          * ops->hs_key must be defined.
1902                          */
1903                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1904                                              hs->hs_rehash_bits, key, &new);
1905                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1906                         c++;
1907                 }
1908         }
1909         return c;
1910 }
1911
1912 static int
1913 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1914 {
1915         struct cfs_hash         *hs =
1916                 container_of(wi, struct cfs_hash, hs_rehash_wi);
1917         struct cfs_hash_bucket **bkts;
1918         struct cfs_hash_bd      bd;
1919         unsigned int            old_size;
1920         unsigned int            new_size;
1921         int                     bsize;
1922         int                     count = 0;
1923         int                     rc = 0;
1924         int                     i;
1925
1926         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1927
1928         cfs_hash_lock(hs, 0);
1929         LASSERT(cfs_hash_is_rehashing(hs));
1930
1931         old_size = CFS_HASH_NBKT(hs);
1932         new_size = CFS_HASH_RH_NBKT(hs);
1933
1934         cfs_hash_unlock(hs, 0);
1935
1936         /*
1937          * don't need hs::hs_rwlock for hs::hs_buckets,
1938          * because nobody can change bkt-table except me.
1939          */
1940         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1941                                         old_size, new_size);
1942         cfs_hash_lock(hs, 1);
1943         if (bkts == NULL) {
1944                 rc = -ENOMEM;
1945                 goto out;
1946         }
1947
1948         if (bkts == hs->hs_buckets) {
1949                 bkts = NULL; /* do nothing */
1950                 goto out;
1951         }
1952
1953         rc = __cfs_hash_theta(hs);
1954         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1955                 /* free the new allocated bkt-table */
1956                 old_size = new_size;
1957                 new_size = CFS_HASH_NBKT(hs);
1958                 rc = -EALREADY;
1959                 goto out;
1960         }
1961
1962         LASSERT(hs->hs_rehash_buckets == NULL);
1963         hs->hs_rehash_buckets = bkts;
1964
1965         rc = 0;
1966         cfs_hash_for_each_bucket(hs, &bd, i) {
1967                 if (cfs_hash_is_exiting(hs)) {
1968                         rc = -ESRCH;
1969                         /* someone wants to destroy the hash, abort now */
1970                         if (old_size < new_size) /* OK to free old bkt-table */
1971                                 break;
1972                         /* it's shrinking, need free new bkt-table */
1973                         hs->hs_rehash_buckets = NULL;
1974                         old_size = new_size;
1975                         new_size = CFS_HASH_NBKT(hs);
1976                         goto out;
1977                 }
1978
1979                 count += cfs_hash_rehash_bd(hs, &bd);
1980                 if (count < CFS_HASH_LOOP_HOG ||
1981                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1982                         continue;
1983                 }
1984
1985                 count = 0;
1986                 cfs_hash_unlock(hs, 1);
1987                 cond_resched();
1988                 cfs_hash_lock(hs, 1);
1989         }
1990
1991         hs->hs_rehash_count++;
1992
1993         bkts = hs->hs_buckets;
1994         hs->hs_buckets = hs->hs_rehash_buckets;
1995         hs->hs_rehash_buckets = NULL;
1996
1997         hs->hs_cur_bits = hs->hs_rehash_bits;
1998  out:
1999         hs->hs_rehash_bits = 0;
2000         if (rc == -ESRCH) /* never be scheduled again */
2001                 cfs_wi_exit(cfs_sched_rehash, wi);
2002         bsize = cfs_hash_bkt_size(hs);
2003         cfs_hash_unlock(hs, 1);
2004         /* can't refer to @hs anymore because it could be destroyed */
2005         if (bkts != NULL)
2006                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
2007         if (rc != 0)
2008                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
2009         /* return 1 only if cfs_wi_exit is called */
2010         return rc == -ESRCH;
2011 }
2012
2013 /**
2014  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
2015  * @old_key must be provided to locate the objects previous location
2016  * in the hash, and the @new_key will be used to reinsert the object.
2017  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
2018  * combo when it is critical that there is no window in time where the
2019  * object is missing from the hash.  When an object is being rehashed
2020  * the registered cfs_hash_get() and cfs_hash_put() functions will
2021  * not be called.
2022  */
2023 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2024                          void *new_key, struct hlist_node *hnode)
2025 {
2026         struct cfs_hash_bd        bds[3];
2027         struct cfs_hash_bd        old_bds[2];
2028         struct cfs_hash_bd        new_bd;
2029
2030         LASSERT(!hlist_unhashed(hnode));
2031
2032         cfs_hash_lock(hs, 0);
2033
2034         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2035         cfs_hash_bd_get(hs, new_key, &new_bd);
2036
2037         bds[0] = old_bds[0];
2038         bds[1] = old_bds[1];
2039         bds[2] = new_bd;
2040
2041         /* NB: bds[0] and bds[1] are ordered already */
2042         cfs_hash_bd_order(&bds[1], &bds[2]);
2043         cfs_hash_bd_order(&bds[0], &bds[1]);
2044
2045         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2046         if (likely(old_bds[1].bd_bucket == NULL)) {
2047                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2048         } else {
2049                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2050                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2051         }
2052         /* overwrite key inside locks, otherwise may screw up with
2053          * other operations, i.e: rehash */
2054         cfs_hash_keycpy(hs, hnode, new_key);
2055
2056         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2057         cfs_hash_unlock(hs, 0);
2058 }
2059 EXPORT_SYMBOL(cfs_hash_rehash_key);
2060
2061 int cfs_hash_debug_header(struct seq_file *m)
2062 {
2063         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2064                         CFS_HASH_BIGNAME_LEN,
2065                         "name", "cur", "min", "max", "theta", "t-min", "t-max",
2066                         "flags", "rehash", "count", "maxdep", "maxdepb",
2067                         " distribution");
2068 }
2069 EXPORT_SYMBOL(cfs_hash_debug_header);
2070
2071 static struct cfs_hash_bucket **
2072 cfs_hash_full_bkts(struct cfs_hash *hs)
2073 {
2074         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2075         if (hs->hs_rehash_buckets == NULL)
2076                 return hs->hs_buckets;
2077
2078         LASSERT(hs->hs_rehash_bits != 0);
2079         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2080                hs->hs_rehash_buckets : hs->hs_buckets;
2081 }
2082
2083 static unsigned int
2084 cfs_hash_full_nbkt(struct cfs_hash *hs)
2085 {
2086         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2087         if (hs->hs_rehash_buckets == NULL)
2088                 return CFS_HASH_NBKT(hs);
2089
2090         LASSERT(hs->hs_rehash_bits != 0);
2091         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2092                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2093 }
2094
2095 int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2096 {
2097         int     dist[8] = { 0, };
2098         int     maxdep  = -1;
2099         int     maxdepb = -1;
2100         int     total   = 0;
2101         int     c       = 0;
2102         int     theta;
2103         int     i;
2104
2105         cfs_hash_lock(hs, 0);
2106         theta = __cfs_hash_theta(hs);
2107
2108         c += seq_printf(m, "%-*s ", CFS_HASH_BIGNAME_LEN, hs->hs_name);
2109         c += seq_printf(m, "%5d ",  1 << hs->hs_cur_bits);
2110         c += seq_printf(m, "%5d ",  1 << hs->hs_min_bits);
2111         c += seq_printf(m, "%5d ",  1 << hs->hs_max_bits);
2112         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(theta),
2113                         __cfs_hash_theta_frac(theta));
2114         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_min_theta),
2115                         __cfs_hash_theta_frac(hs->hs_min_theta));
2116         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_max_theta),
2117                         __cfs_hash_theta_frac(hs->hs_max_theta));
2118         c += seq_printf(m, " 0x%02x ", hs->hs_flags);
2119         c += seq_printf(m, "%6d ", hs->hs_rehash_count);
2120
2121         /*
2122          * The distribution is a summary of the chained hash depth in
2123          * each of the libcfs hash buckets.  Each buckets hsb_count is
2124          * divided by the hash theta value and used to generate a
2125          * histogram of the hash distribution.  A uniform hash will
2126          * result in all hash buckets being close to the average thus
2127          * only the first few entries in the histogram will be non-zero.
2128          * If you hash function results in a non-uniform hash the will
2129          * be observable by outlier bucks in the distribution histogram.
2130          *
2131          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2132          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2133          */
2134         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2135                 struct cfs_hash_bd bd;
2136
2137                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2138                 cfs_hash_bd_lock(hs, &bd, 0);
2139                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2140                         maxdep  = bd.bd_bucket->hsb_depmax;
2141                         maxdepb = ffz(~maxdep);
2142                 }
2143                 total += bd.bd_bucket->hsb_count;
2144                 dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2145                 cfs_hash_bd_unlock(hs, &bd, 0);
2146         }
2147
2148         c += seq_printf(m, "%7d ", total);
2149         c += seq_printf(m, "%7d ", maxdep);
2150         c += seq_printf(m, "%7d ", maxdepb);
2151         for (i = 0; i < 8; i++)
2152                 c += seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2153
2154         cfs_hash_unlock(hs, 0);
2155         return c;
2156 }
2157 EXPORT_SYMBOL(cfs_hash_debug_str);