Whamcloud - gitweb
55a61fd53e09cf65beb8af90fa1425a775d60ea2
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/hash.c
33  *
34  * Implement a hash class for hash process in lustre system.
35  *
36  * Author: YuZhangyong <yzy@clusterfs.com>
37  *
38  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
39  * - Simplified API and improved documentation
40  * - Added per-hash feature flags:
41  *   * CFS_HASH_DEBUG additional validation
42  *   * CFS_HASH_REHASH dynamic rehashing
43  * - Added per-hash statistics
44  * - General performance enhancements
45  *
46  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
47  * - move all stuff to libcfs
48  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
49  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
50  * - buckets are allocated one by one(instead of contiguous memory),
51  *   to avoid unnecessary cacheline conflict
52  *
53  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
54  * - "bucket" is a group of hlist_head now, user can specify bucket size
55  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
56  *   one lock for reducing memory overhead.
57  *
58  * - support lockless hash, caller will take care of locks:
59  *   avoid lock overhead for hash tables that are already protected
60  *   by locking in the caller for another reason
61  *
62  * - support both spin_lock/rwlock for bucket:
63  *   overhead of spinlock contention is lower than read/write
64  *   contention of rwlock, so using spinlock to serialize operations on
65  *   bucket is more reasonable for those frequently changed hash tables
66  *
67  * - support one-single lock mode:
68  *   one lock to protect all hash operations to avoid overhead of
69  *   multiple locks if hash table is always small
70  *
71  * - removed a lot of unnecessary addref & decref on hash element:
72  *   addref & decref are atomic operations in many use-cases which
73  *   are expensive.
74  *
75  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
76  *   some lustre use-cases require these functions to be strictly
77  *   non-blocking, we need to schedule required rehash on a different
78  *   thread on those cases.
79  *
80  * - safer rehash on large hash table
81  *   In old implementation, rehash function will exclusively lock the
82  *   hash table and finish rehash in one batch, it's dangerous on SMP
83  *   system because rehash millions of elements could take long time.
84  *   New implemented rehash can release lock and relax CPU in middle
85  *   of rehash, it's safe for another thread to search/change on the
86  *   hash table even it's in rehasing.
87  *
88  * - support two different refcount modes
89  *   . hash table has refcount on element
90  *   . hash table doesn't change refcount on adding/removing element
91  *
92  * - support long name hash table (for param-tree)
93  *
94  * - fix a bug for cfs_hash_rehash_key:
95  *   in old implementation, cfs_hash_rehash_key could screw up the
96  *   hash-table because @key is overwritten without any protection.
97  *   Now we need user to define hs_keycpy for those rehash enabled
98  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
99  *   inside lock by calling hs_keycpy.
100  *
101  * - better hash iteration:
102  *   Now we support both locked iteration & lockless iteration of hash
103  *   table. Also, user can break the iteration by return 1 in callback.
104  */
105 #include <linux/seq_file.h>
106 #include <linux/log2.h>
107
108 #include <libcfs/linux/linux-list.h>
109 #include <libcfs/libcfs.h>
110
111 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
112 static unsigned int warn_on_depth = 8;
113 module_param(warn_on_depth, uint, 0644);
114 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
115 #endif
116
117 struct workqueue_struct *cfs_rehash_wq;
118
119 static inline void
120 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
121
122 static inline void
123 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
124
125 static inline void
126 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
127         __acquires(&lock->spin)
128 {
129         spin_lock(&lock->spin);
130 }
131
132 static inline void
133 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
134         __releases(&lock->spin)
135 {
136         spin_unlock(&lock->spin);
137 }
138
139 static inline void
140 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
141         __acquires(&lock->rw)
142 {
143         if (!exclusive)
144                 read_lock(&lock->rw);
145         else
146                 write_lock(&lock->rw);
147 }
148
149 static inline void
150 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
151         __releases(&lock->rw)
152 {
153         if (!exclusive)
154                 read_unlock(&lock->rw);
155         else
156                 write_unlock(&lock->rw);
157 }
158
159 static inline void
160 cfs_hash_rw_sem_lock(union cfs_hash_lock *lock, int exclusive)
161         __acquires(&lock->rw_sem)
162 {
163         if (!exclusive)
164                 down_read(&lock->rw_sem);
165         else
166                 down_write(&lock->rw_sem);
167 }
168
169 static inline void
170 cfs_hash_rw_sem_unlock(union cfs_hash_lock *lock, int exclusive)
171         __releases(&lock->rw_sem)
172 {
173         if (!exclusive)
174                 up_read(&lock->rw_sem);
175         else
176                 up_write(&lock->rw_sem);
177 }
178
179 /** No lock hash */
180 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
181         .hs_lock        = cfs_hash_nl_lock,
182         .hs_unlock      = cfs_hash_nl_unlock,
183         .hs_bkt_lock    = cfs_hash_nl_lock,
184         .hs_bkt_unlock  = cfs_hash_nl_unlock,
185 };
186
187 /** no bucket lock, one spinlock to protect everything */
188 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
189         .hs_lock        = cfs_hash_spin_lock,
190         .hs_unlock      = cfs_hash_spin_unlock,
191         .hs_bkt_lock    = cfs_hash_nl_lock,
192         .hs_bkt_unlock  = cfs_hash_nl_unlock,
193 };
194
195 /** spin bucket lock, rehash is enabled */
196 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
197         .hs_lock        = cfs_hash_rw_lock,
198         .hs_unlock      = cfs_hash_rw_unlock,
199         .hs_bkt_lock    = cfs_hash_spin_lock,
200         .hs_bkt_unlock  = cfs_hash_spin_unlock,
201 };
202
203 /** rw bucket lock, rehash is enabled */
204 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
205         .hs_lock        = cfs_hash_rw_lock,
206         .hs_unlock      = cfs_hash_rw_unlock,
207         .hs_bkt_lock    = cfs_hash_rw_lock,
208         .hs_bkt_unlock  = cfs_hash_rw_unlock,
209 };
210
211 /** spin bucket lock, rehash is disabled */
212 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
213         .hs_lock        = cfs_hash_nl_lock,
214         .hs_unlock      = cfs_hash_nl_unlock,
215         .hs_bkt_lock    = cfs_hash_spin_lock,
216         .hs_bkt_unlock  = cfs_hash_spin_unlock,
217 };
218
219 /** rw bucket lock, rehash is disabled */
220 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
221         .hs_lock        = cfs_hash_nl_lock,
222         .hs_unlock      = cfs_hash_nl_unlock,
223         .hs_bkt_lock    = cfs_hash_rw_lock,
224         .hs_bkt_unlock  = cfs_hash_rw_unlock,
225 };
226
227 /** rw_sem bucket lock, rehash is disabled */
228 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_sem_lops = {
229         .hs_lock        = cfs_hash_nl_lock,
230         .hs_unlock      = cfs_hash_nl_unlock,
231         .hs_bkt_lock    = cfs_hash_rw_sem_lock,
232         .hs_bkt_unlock  = cfs_hash_rw_sem_unlock,
233 };
234
235 /** rw_sem bucket lock, rehash is enabled */
236 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_sem_lops = {
237         .hs_lock        = cfs_hash_rw_sem_lock,
238         .hs_unlock      = cfs_hash_rw_sem_unlock,
239         .hs_bkt_lock    = cfs_hash_rw_sem_lock,
240         .hs_bkt_unlock  = cfs_hash_rw_sem_unlock,
241 };
242
243 static void
244 cfs_hash_lock_setup(struct cfs_hash *hs)
245 {
246         if (cfs_hash_with_no_lock(hs)) {
247                 hs->hs_lops = &cfs_hash_nl_lops;
248
249         } else if (cfs_hash_with_no_bktlock(hs)) {
250                 hs->hs_lops = &cfs_hash_nbl_lops;
251                 spin_lock_init(&hs->hs_lock.spin);
252
253         } else if (cfs_hash_with_rehash(hs)) {
254                 if (cfs_hash_with_rw_sem_bktlock(hs)) {
255                         init_rwsem(&hs->hs_lock.rw_sem);
256                         hs->hs_lops = &cfs_hash_bkt_rw_sem_lops;
257                 } else {
258                         rwlock_init(&hs->hs_lock.rw);
259
260                         if (cfs_hash_with_rw_bktlock(hs))
261                                 hs->hs_lops = &cfs_hash_bkt_rw_lops;
262                         else if (cfs_hash_with_spin_bktlock(hs))
263                                 hs->hs_lops = &cfs_hash_bkt_spin_lops;
264                         else
265                                 LBUG();
266                 }
267         } else {
268                 if (cfs_hash_with_rw_bktlock(hs))
269                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
270                 else if (cfs_hash_with_spin_bktlock(hs))
271                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
272                 else if (cfs_hash_with_rw_sem_bktlock(hs))
273                         hs->hs_lops = &cfs_hash_nr_bkt_rw_sem_lops;
274                 else
275                         LBUG();
276         }
277 }
278
279 /**
280  * Simple hash head without depth tracking
281  * new element is always added to head of hlist
282  */
283 struct cfs_hash_head {
284         struct hlist_head       hh_head;        /**< entries list */
285 };
286
287 static int
288 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
289 {
290         return sizeof(struct cfs_hash_head);
291 }
292
293 static struct hlist_head *
294 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
295 {
296         struct cfs_hash_head *head;
297
298         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
299         return &head[bd->bd_offset].hh_head;
300 }
301
302 static int
303 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
304                       struct hlist_node *hnode)
305 {
306         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
307         return -1; /* unknown depth */
308 }
309
310 static int
311 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
312                       struct hlist_node *hnode)
313 {
314         hlist_del_init(hnode);
315         return -1; /* unknown depth */
316 }
317
318 /**
319  * Simple hash head with depth tracking
320  * new element is always added to head of hlist
321  */
322 struct cfs_hash_head_dep {
323         struct hlist_head       hd_head;        /**< entries list */
324         unsigned int            hd_depth;       /**< list length */
325 };
326
327 static int
328 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
329 {
330         return sizeof(struct cfs_hash_head_dep);
331 }
332
333 static struct hlist_head *
334 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
335 {
336         struct cfs_hash_head_dep   *head;
337
338         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
339         return &head[bd->bd_offset].hd_head;
340 }
341
342 static int
343 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
344                       struct hlist_node *hnode)
345 {
346         struct cfs_hash_head_dep *hh;
347
348         hh = container_of(cfs_hash_hd_hhead(hs, bd),
349                           struct cfs_hash_head_dep, hd_head);
350         hlist_add_head(hnode, &hh->hd_head);
351         return ++hh->hd_depth;
352 }
353
354 static int
355 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
356                       struct hlist_node *hnode)
357 {
358         struct cfs_hash_head_dep *hh;
359
360         hh = container_of(cfs_hash_hd_hhead(hs, bd),
361                           struct cfs_hash_head_dep, hd_head);
362         hlist_del_init(hnode);
363         return --hh->hd_depth;
364 }
365
366 /**
367  * double links hash head without depth tracking
368  * new element is always added to tail of hlist
369  */
370 struct cfs_hash_dhead {
371         struct hlist_head       dh_head;        /**< entries list */
372         struct hlist_node       *dh_tail;       /**< the last entry */
373 };
374
375 static int
376 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
377 {
378         return sizeof(struct cfs_hash_dhead);
379 }
380
381 static struct hlist_head *
382 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
383 {
384         struct cfs_hash_dhead *head;
385
386         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
387         return &head[bd->bd_offset].dh_head;
388 }
389
390 static int
391 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
392                       struct hlist_node *hnode)
393 {
394         struct cfs_hash_dhead *dh;
395
396         dh = container_of(cfs_hash_dh_hhead(hs, bd),
397                           struct cfs_hash_dhead, dh_head);
398         if (dh->dh_tail != NULL) /* not empty */
399                 hlist_add_behind(hnode, dh->dh_tail);
400         else /* empty list */
401                 hlist_add_head(hnode, &dh->dh_head);
402         dh->dh_tail = hnode;
403         return -1; /* unknown depth */
404 }
405
406 static int
407 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
408                       struct hlist_node *hnd)
409 {
410         struct cfs_hash_dhead *dh;
411
412         dh = container_of(cfs_hash_dh_hhead(hs, bd),
413                           struct cfs_hash_dhead, dh_head);
414         if (hnd->next == NULL) { /* it's the tail */
415                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
416                               container_of(hnd->pprev, struct hlist_node, next);
417         }
418         hlist_del_init(hnd);
419         return -1; /* unknown depth */
420 }
421
422 /**
423  * double links hash head with depth tracking
424  * new element is always added to tail of hlist
425  */
426 struct cfs_hash_dhead_dep {
427         struct hlist_head       dd_head;        /**< entries list */
428         struct hlist_node       *dd_tail;       /**< the last entry */
429         unsigned int            dd_depth;       /**< list length */
430 };
431
432 static int
433 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
434 {
435         return sizeof(struct cfs_hash_dhead_dep);
436 }
437
438 static struct hlist_head *
439 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
440 {
441         struct cfs_hash_dhead_dep *head;
442
443         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
444         return &head[bd->bd_offset].dd_head;
445 }
446
447 static int
448 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
449                       struct hlist_node *hnode)
450 {
451         struct cfs_hash_dhead_dep *dh;
452
453         dh = container_of(cfs_hash_dd_hhead(hs, bd),
454                           struct cfs_hash_dhead_dep, dd_head);
455         if (dh->dd_tail != NULL) /* not empty */
456                 hlist_add_behind(hnode, dh->dd_tail);
457         else /* empty list */
458                 hlist_add_head(hnode, &dh->dd_head);
459         dh->dd_tail = hnode;
460         return ++dh->dd_depth;
461 }
462
463 static int
464 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
465                       struct hlist_node *hnd)
466 {
467         struct cfs_hash_dhead_dep *dh;
468
469         dh = container_of(cfs_hash_dd_hhead(hs, bd),
470                           struct cfs_hash_dhead_dep, dd_head);
471         if (hnd->next == NULL) { /* it's the tail */
472                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
473                               container_of(hnd->pprev, struct hlist_node, next);
474         }
475         hlist_del_init(hnd);
476         return --dh->dd_depth;
477 }
478
479 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
480        .hop_hhead      = cfs_hash_hh_hhead,
481        .hop_hhead_size = cfs_hash_hh_hhead_size,
482        .hop_hnode_add  = cfs_hash_hh_hnode_add,
483        .hop_hnode_del  = cfs_hash_hh_hnode_del,
484 };
485
486 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
487        .hop_hhead      = cfs_hash_hd_hhead,
488        .hop_hhead_size = cfs_hash_hd_hhead_size,
489        .hop_hnode_add  = cfs_hash_hd_hnode_add,
490        .hop_hnode_del  = cfs_hash_hd_hnode_del,
491 };
492
493 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
494        .hop_hhead      = cfs_hash_dh_hhead,
495        .hop_hhead_size = cfs_hash_dh_hhead_size,
496        .hop_hnode_add  = cfs_hash_dh_hnode_add,
497        .hop_hnode_del  = cfs_hash_dh_hnode_del,
498 };
499
500 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
501        .hop_hhead      = cfs_hash_dd_hhead,
502        .hop_hhead_size = cfs_hash_dd_hhead_size,
503        .hop_hnode_add  = cfs_hash_dd_hnode_add,
504        .hop_hnode_del  = cfs_hash_dd_hnode_del,
505 };
506
507 static void
508 cfs_hash_hlist_setup(struct cfs_hash *hs)
509 {
510         if (cfs_hash_with_add_tail(hs)) {
511                 hs->hs_hops = cfs_hash_with_depth(hs) ?
512                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
513         } else {
514                 hs->hs_hops = cfs_hash_with_depth(hs) ?
515                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
516         }
517 }
518
519 static void
520 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
521                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
522 {
523         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
524
525         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
526
527         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
528         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
529 }
530
531 void
532 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
533 {
534         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
535         if (likely(hs->hs_rehash_buckets == NULL)) {
536                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
537                                      hs->hs_cur_bits, key, bd);
538         } else {
539                 LASSERT(hs->hs_rehash_bits != 0);
540                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
541                                      hs->hs_rehash_bits, key, bd);
542         }
543 }
544 EXPORT_SYMBOL(cfs_hash_bd_get);
545
546 static inline void
547 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
548 {
549         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
550                 return;
551
552         bd->bd_bucket->hsb_depmax = dep_cur;
553 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
554         if (likely(warn_on_depth == 0 ||
555                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
556                 return;
557
558         spin_lock(&hs->hs_dep_lock);
559         hs->hs_dep_max  = dep_cur;
560         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
561         hs->hs_dep_off  = bd->bd_offset;
562         hs->hs_dep_bits = hs->hs_cur_bits;
563         spin_unlock(&hs->hs_dep_lock);
564
565         queue_work(cfs_rehash_wq, &hs->hs_dep_work);
566 # endif
567 }
568
569 void
570 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
571                         struct hlist_node *hnode)
572 {
573         int rc;
574
575         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
576         cfs_hash_bd_dep_record(hs, bd, rc);
577         bd->bd_bucket->hsb_version++;
578         if (unlikely(bd->bd_bucket->hsb_version == 0))
579                 bd->bd_bucket->hsb_version++;
580         bd->bd_bucket->hsb_count++;
581
582         if (cfs_hash_with_counter(hs))
583                 atomic_inc(&hs->hs_count);
584         if (!cfs_hash_with_no_itemref(hs))
585                 cfs_hash_get(hs, hnode);
586 }
587 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
588
589 void
590 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
591                        struct hlist_node *hnode)
592 {
593         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
594
595         LASSERT(bd->bd_bucket->hsb_count > 0);
596         bd->bd_bucket->hsb_count--;
597         bd->bd_bucket->hsb_version++;
598         if (unlikely(bd->bd_bucket->hsb_version == 0))
599                 bd->bd_bucket->hsb_version++;
600
601         if (cfs_hash_with_counter(hs)) {
602                 LASSERT(atomic_read(&hs->hs_count) > 0);
603                 atomic_dec(&hs->hs_count);
604         }
605         if (!cfs_hash_with_no_itemref(hs))
606                 cfs_hash_put_locked(hs, hnode);
607 }
608 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
609
610 void
611 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
612                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
613 {
614         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
615         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
616         int                rc;
617
618         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
619                 return;
620
621         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
622          * in cfs_hash_bd_del/add_locked */
623         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
624         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
625         cfs_hash_bd_dep_record(hs, bd_new, rc);
626
627         LASSERT(obkt->hsb_count > 0);
628         obkt->hsb_count--;
629         obkt->hsb_version++;
630         if (unlikely(obkt->hsb_version == 0))
631                 obkt->hsb_version++;
632         nbkt->hsb_count++;
633         nbkt->hsb_version++;
634         if (unlikely(nbkt->hsb_version == 0))
635                 nbkt->hsb_version++;
636 }
637
638 enum {
639         /** always set, for sanity (avoid ZERO intent) */
640         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
641         /** return entry with a ref */
642         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
643         /** add entry if not existing */
644         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
645         /** delete entry, ignore other masks */
646         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
647 };
648
649 enum cfs_hash_lookup_intent {
650         /** return item w/o refcount */
651         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
652         /** return item with refcount */
653         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
654                                        CFS_HS_LOOKUP_MASK_REF),
655         /** return item w/o refcount if existed, otherwise add */
656         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
657                                        CFS_HS_LOOKUP_MASK_ADD),
658         /** return item with refcount if existed, otherwise add */
659         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
660                                        CFS_HS_LOOKUP_MASK_ADD),
661         /** delete if existed */
662         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
663                                        CFS_HS_LOOKUP_MASK_DEL)
664 };
665
666 static struct hlist_node *
667 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
668                           const void *key, struct hlist_node *hnode,
669                           enum cfs_hash_lookup_intent intent)
670
671 {
672         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
673         struct hlist_node  *ehnode;
674         struct hlist_node  *match;
675         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
676
677         /* with this function, we can avoid a lot of useless refcount ops,
678          * which are expensive atomic operations most time. */
679         match = intent_add ? NULL : hnode;
680         hlist_for_each(ehnode, hhead) {
681                 if (!cfs_hash_keycmp(hs, key, ehnode))
682                         continue;
683
684                 if (match != NULL && match != ehnode) /* can't match */
685                         continue;
686
687                 /* match and ... */
688                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
689                         cfs_hash_bd_del_locked(hs, bd, ehnode);
690                         return ehnode;
691                 }
692
693                 /* caller wants refcount? */
694                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
695                         cfs_hash_get(hs, ehnode);
696                 return ehnode;
697         }
698         /* no match item */
699         if (!intent_add)
700                 return NULL;
701
702         LASSERT(hnode != NULL);
703         cfs_hash_bd_add_locked(hs, bd, hnode);
704         return hnode;
705 }
706
707 struct hlist_node *
708 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
709                           const void *key)
710 {
711         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
712                                         CFS_HS_LOOKUP_IT_FIND);
713 }
714 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
715
716 struct hlist_node *
717 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
718                         const void *key)
719 {
720         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
721                                         CFS_HS_LOOKUP_IT_PEEK);
722 }
723 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
724
725 static void
726 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
727                        unsigned n, int excl)
728 {
729         struct cfs_hash_bucket *prev = NULL;
730         int                i;
731
732         /**
733          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
734          * NB: it's possible that several bds point to the same bucket but
735          * have different bd::bd_offset, so need take care of deadlock.
736          */
737         cfs_hash_for_each_bd(bds, n, i) {
738                 if (prev == bds[i].bd_bucket)
739                         continue;
740
741                 LASSERT(prev == NULL ||
742                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
743                 cfs_hash_bd_lock(hs, &bds[i], excl);
744                 prev = bds[i].bd_bucket;
745         }
746 }
747
748 static void
749 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
750                          unsigned n, int excl)
751 {
752         struct cfs_hash_bucket *prev = NULL;
753         int                i;
754
755         cfs_hash_for_each_bd(bds, n, i) {
756                 if (prev != bds[i].bd_bucket) {
757                         cfs_hash_bd_unlock(hs, &bds[i], excl);
758                         prev = bds[i].bd_bucket;
759                 }
760         }
761 }
762
763 static struct hlist_node *
764 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
765                                 unsigned n, const void *key)
766 {
767         struct hlist_node *ehnode;
768         unsigned          i;
769
770         cfs_hash_for_each_bd(bds, n, i) {
771                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
772                                                         CFS_HS_LOOKUP_IT_FIND);
773                 if (ehnode != NULL)
774                         return ehnode;
775         }
776         return NULL;
777 }
778
779 static struct hlist_node *
780 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
781                                  unsigned n, const void *key,
782                                  struct hlist_node *hnode, int noref)
783 {
784         struct hlist_node *ehnode;
785         int               intent;
786         unsigned          i;
787
788         LASSERT(hnode != NULL);
789         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
790
791         cfs_hash_for_each_bd(bds, n, i) {
792                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
793                                                    NULL, intent);
794                 if (ehnode != NULL)
795                         return ehnode;
796         }
797
798         if (i == 1) { /* only one bucket */
799                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
800         } else {
801                 struct cfs_hash_bd      mybd;
802
803                 cfs_hash_bd_get(hs, key, &mybd);
804                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
805         }
806
807         return hnode;
808 }
809
810 static struct hlist_node *
811 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
812                                  unsigned n, const void *key,
813                                  struct hlist_node *hnode)
814 {
815         struct hlist_node *ehnode;
816         unsigned           i;
817
818         cfs_hash_for_each_bd(bds, n, i) {
819                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
820                                                    CFS_HS_LOOKUP_IT_FINDDEL);
821                 if (ehnode != NULL)
822                         return ehnode;
823         }
824         return NULL;
825 }
826
827 static void
828 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
829 {
830         int     rc;
831
832         if (bd2->bd_bucket == NULL)
833                 return;
834
835         if (bd1->bd_bucket == NULL) {
836                 *bd1 = *bd2;
837                 bd2->bd_bucket = NULL;
838                 return;
839         }
840
841         rc = cfs_hash_bd_compare(bd1, bd2);
842         if (rc == 0) {
843                 bd2->bd_bucket = NULL;
844
845         } else if (rc > 0) {
846                 swap(*bd1, *bd2); /* swab bd1 and bd2 */
847         }
848 }
849
850 void
851 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
852                      struct cfs_hash_bd *bds)
853 {
854         /* NB: caller should hold hs_lock.rw if REHASH is set */
855         cfs_hash_bd_from_key(hs, hs->hs_buckets,
856                              hs->hs_cur_bits, key, &bds[0]);
857         if (likely(hs->hs_rehash_buckets == NULL)) {
858                 /* no rehash or not rehashing */
859                 bds[1].bd_bucket = NULL;
860                 return;
861         }
862
863         LASSERT(hs->hs_rehash_bits != 0);
864         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
865                              hs->hs_rehash_bits, key, &bds[1]);
866
867         cfs_hash_bd_order(&bds[0], &bds[1]);
868 }
869
870 void
871 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
872 {
873         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
874 }
875
876 void
877 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
878 {
879         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
880 }
881
882 struct hlist_node *
883 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
884                                const void *key)
885 {
886         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
887 }
888
889 struct hlist_node *
890 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
891                                 const void *key, struct hlist_node *hnode,
892                                 int noref)
893 {
894         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
895                                                 hnode, noref);
896 }
897
898 struct hlist_node *
899 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
900                                 const void *key, struct hlist_node *hnode)
901 {
902         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
903 }
904
905 static void
906 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
907                       int bkt_size, int prev_size, int size)
908 {
909         int     i;
910
911         for (i = prev_size; i < size; i++) {
912                 if (buckets[i] != NULL)
913                         LIBCFS_FREE(buckets[i], bkt_size);
914         }
915
916         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
917 }
918
919 /*
920  * Create or grow bucket memory. Return old_buckets if no allocation was
921  * needed, the newly allocated buckets if allocation was needed and
922  * successful, and NULL on error.
923  */
924 static struct cfs_hash_bucket **
925 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
926                          unsigned int old_size, unsigned int new_size)
927 {
928         struct cfs_hash_bucket **new_bkts;
929         int                 i;
930
931         LASSERT(old_size == 0 || old_bkts != NULL);
932
933         if (old_bkts != NULL && old_size == new_size)
934                 return old_bkts;
935
936         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
937         if (new_bkts == NULL)
938                 return NULL;
939
940         if (old_bkts != NULL) {
941                 memcpy(new_bkts, old_bkts,
942                        min(old_size, new_size) * sizeof(*old_bkts));
943         }
944
945         for (i = old_size; i < new_size; i++) {
946                 struct hlist_head *hhead;
947                 struct cfs_hash_bd     bd;
948
949                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
950                 if (new_bkts[i] == NULL) {
951                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
952                                               old_size, new_size);
953                         return NULL;
954                 }
955
956                 new_bkts[i]->hsb_index   = i;
957                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
958                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
959                 bd.bd_bucket = new_bkts[i];
960                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
961                         INIT_HLIST_HEAD(hhead);
962
963                 if (cfs_hash_with_no_lock(hs) ||
964                     cfs_hash_with_no_bktlock(hs))
965                         continue;
966
967                 if (cfs_hash_with_rw_bktlock(hs))
968                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
969                 else if (cfs_hash_with_spin_bktlock(hs))
970                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
971                 else if (cfs_hash_with_rw_sem_bktlock(hs))
972                         init_rwsem(&new_bkts[i]->hsb_lock.rw_sem);
973                 else
974                         LBUG(); /* invalid use-case */
975         }
976         return new_bkts;
977 }
978
979 /**
980  * Initialize new libcfs hash, where:
981  * @name     - Descriptive hash name
982  * @cur_bits - Initial hash table size, in bits
983  * @max_bits - Maximum allowed hash table resize, in bits
984  * @ops      - Registered hash table operations
985  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
986  *           - CFS_HASH_SORT enable chained hash sort
987  */
988 static void cfs_hash_rehash_worker(struct work_struct *work);
989
990 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
991 static void cfs_hash_dep_print(struct work_struct *work)
992 {
993         struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
994         int         dep;
995         int         bkt;
996         int         off;
997         int         bits;
998
999         spin_lock(&hs->hs_dep_lock);
1000         dep  = hs->hs_dep_max;
1001         bkt  = hs->hs_dep_bkt;
1002         off  = hs->hs_dep_off;
1003         bits = hs->hs_dep_bits;
1004         spin_unlock(&hs->hs_dep_lock);
1005
1006         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
1007                       hs->hs_name, bits, dep, bkt, off);
1008         spin_lock(&hs->hs_dep_lock);
1009         hs->hs_dep_bits = 0; /* mark as workitem done */
1010         spin_unlock(&hs->hs_dep_lock);
1011         return 0;
1012 }
1013
1014 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
1015 {
1016         spin_lock_init(&hs->hs_dep_lock);
1017         INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
1018 }
1019
1020 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1021 {
1022         cancel_work_sync(&hs->hs_dep_work);
1023 }
1024
1025 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1026
1027 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1028 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1029
1030 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1031
1032 struct cfs_hash *
1033 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1034                 unsigned bkt_bits, unsigned extra_bytes,
1035                 unsigned min_theta, unsigned max_theta,
1036                 struct cfs_hash_ops *ops, unsigned flags)
1037 {
1038         struct cfs_hash *hs;
1039         int         len;
1040
1041         ENTRY;
1042
1043         BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
1044
1045         LASSERT(name != NULL);
1046         LASSERT(ops != NULL);
1047         LASSERT(ops->hs_key);
1048         LASSERT(ops->hs_hash);
1049         LASSERT(ops->hs_object);
1050         LASSERT(ops->hs_keycmp);
1051         LASSERT(ops->hs_get != NULL);
1052         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1053
1054         if ((flags & CFS_HASH_REHASH) != 0)
1055                 flags |= CFS_HASH_COUNTER; /* must have counter */
1056
1057         LASSERT(cur_bits > 0);
1058         LASSERT(cur_bits >= bkt_bits);
1059         LASSERT(max_bits >= cur_bits && max_bits < 31);
1060         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1061         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1062                      (flags & CFS_HASH_NO_LOCK) == 0));
1063         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1064                       ops->hs_keycpy != NULL));
1065
1066         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1067               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1068         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1069         if (hs == NULL)
1070                 RETURN(NULL);
1071
1072         strlcpy(hs->hs_name, name, len);
1073         hs->hs_flags = flags;
1074
1075         atomic_set(&hs->hs_refcount, 1);
1076         atomic_set(&hs->hs_count, 0);
1077
1078         cfs_hash_lock_setup(hs);
1079         cfs_hash_hlist_setup(hs);
1080
1081         hs->hs_cur_bits = (__u8)cur_bits;
1082         hs->hs_min_bits = (__u8)cur_bits;
1083         hs->hs_max_bits = (__u8)max_bits;
1084         hs->hs_bkt_bits = (__u8)bkt_bits;
1085
1086         hs->hs_ops         = ops;
1087         hs->hs_extra_bytes = extra_bytes;
1088         hs->hs_rehash_bits = 0;
1089         INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
1090         cfs_hash_depth_wi_init(hs);
1091
1092         if (cfs_hash_with_rehash(hs))
1093                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1094
1095         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1096                                                   CFS_HASH_NBKT(hs));
1097         if (hs->hs_buckets != NULL)
1098                 return hs;
1099
1100         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1101         RETURN(NULL);
1102 }
1103 EXPORT_SYMBOL(cfs_hash_create);
1104
1105 /**
1106  * Cleanup libcfs hash @hs.
1107  */
1108 static void
1109 cfs_hash_destroy(struct cfs_hash *hs)
1110 {
1111         struct hlist_node     *hnode;
1112         struct hlist_node     *pos;
1113         struct cfs_hash_bd         bd;
1114         int                   i;
1115         ENTRY;
1116
1117         LASSERT(hs != NULL);
1118         LASSERT(!cfs_hash_is_exiting(hs) &&
1119                 !cfs_hash_is_iterating(hs));
1120
1121         /**
1122          * prohibit further rehashes, don't need any lock because
1123          * I'm the only (last) one can change it.
1124          */
1125         hs->hs_exiting = 1;
1126         if (cfs_hash_with_rehash(hs))
1127                 cfs_hash_rehash_cancel(hs);
1128
1129         cfs_hash_depth_wi_cancel(hs);
1130         /* rehash should be done/canceled */
1131         LASSERT(hs->hs_buckets != NULL &&
1132                 hs->hs_rehash_buckets == NULL);
1133
1134         cfs_hash_for_each_bucket(hs, &bd, i) {
1135                 struct hlist_head *hhead;
1136
1137                 LASSERT(bd.bd_bucket != NULL);
1138                 /* no need to take this lock, just for consistent code */
1139                 cfs_hash_bd_lock(hs, &bd, 1);
1140
1141                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1142                         hlist_for_each_safe(hnode, pos, hhead) {
1143                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1144                                         "hash %s bucket %u(%u) is not "
1145                                         " empty: %u items left\n",
1146                                         hs->hs_name, bd.bd_bucket->hsb_index,
1147                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1148                                 /* can't assert key valicate, because we
1149                                  * can interrupt rehash */
1150                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1151                                 cfs_hash_exit(hs, hnode);
1152                         }
1153                 }
1154                 LASSERT(bd.bd_bucket->hsb_count == 0);
1155                 cfs_hash_bd_unlock(hs, &bd, 1);
1156                 cond_resched();
1157         }
1158
1159         LASSERT(atomic_read(&hs->hs_count) == 0);
1160
1161         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1162                               0, CFS_HASH_NBKT(hs));
1163         i = cfs_hash_with_bigname(hs) ?
1164             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1165         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1166
1167         EXIT;
1168 }
1169
1170 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1171 {
1172         if (atomic_inc_not_zero(&hs->hs_refcount))
1173                 return hs;
1174         return NULL;
1175 }
1176 EXPORT_SYMBOL(cfs_hash_getref);
1177
1178 void cfs_hash_putref(struct cfs_hash *hs)
1179 {
1180         if (atomic_dec_and_test(&hs->hs_refcount))
1181                 cfs_hash_destroy(hs);
1182 }
1183 EXPORT_SYMBOL(cfs_hash_putref);
1184
1185 static inline int
1186 cfs_hash_rehash_bits(struct cfs_hash *hs)
1187 {
1188         if (cfs_hash_with_no_lock(hs) ||
1189             !cfs_hash_with_rehash(hs))
1190                 return -EOPNOTSUPP;
1191
1192         if (unlikely(cfs_hash_is_exiting(hs)))
1193                 return -ESRCH;
1194
1195         if (unlikely(cfs_hash_is_rehashing(hs)))
1196                 return -EALREADY;
1197
1198         if (unlikely(cfs_hash_is_iterating(hs)))
1199                 return -EAGAIN;
1200
1201         /* XXX: need to handle case with max_theta != 2.0
1202          *      and the case with min_theta != 0.5 */
1203         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1204             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1205                 return hs->hs_cur_bits + 1;
1206
1207         if (!cfs_hash_with_shrink(hs))
1208                 return 0;
1209
1210         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1211             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1212                 return hs->hs_cur_bits - 1;
1213
1214         return 0;
1215 }
1216
1217 /**
1218  * don't allow inline rehash if:
1219  * - user wants non-blocking change (add/del) on hash table
1220  * - too many elements
1221  */
1222 static inline int
1223 cfs_hash_rehash_inline(struct cfs_hash *hs)
1224 {
1225         return !cfs_hash_with_nblk_change(hs) &&
1226                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1227 }
1228
1229 /**
1230  * Add item @hnode to libcfs hash @hs using @key.  The registered
1231  * ops->hs_get function will be called when the item is added.
1232  */
1233 void
1234 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1235 {
1236         struct cfs_hash_bd   bd;
1237         int             bits;
1238
1239         LASSERT(hlist_unhashed(hnode));
1240
1241         cfs_hash_lock(hs, 0);
1242         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1243
1244         cfs_hash_key_validate(hs, key, hnode);
1245         cfs_hash_bd_add_locked(hs, &bd, hnode);
1246
1247         cfs_hash_bd_unlock(hs, &bd, 1);
1248
1249         bits = cfs_hash_rehash_bits(hs);
1250         cfs_hash_unlock(hs, 0);
1251         if (bits > 0)
1252                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1253 }
1254 EXPORT_SYMBOL(cfs_hash_add);
1255
1256 static struct hlist_node *
1257 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1258                      struct hlist_node *hnode, int noref)
1259 {
1260         struct hlist_node *ehnode;
1261         struct cfs_hash_bd     bds[2];
1262         int               bits = 0;
1263
1264         LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1265
1266         cfs_hash_lock(hs, 0);
1267         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1268
1269         cfs_hash_key_validate(hs, key, hnode);
1270         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1271                                                  hnode, noref);
1272         cfs_hash_dual_bd_unlock(hs, bds, 1);
1273
1274         if (ehnode == hnode) /* new item added */
1275                 bits = cfs_hash_rehash_bits(hs);
1276         cfs_hash_unlock(hs, 0);
1277         if (bits > 0)
1278                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1279
1280         return ehnode;
1281 }
1282
1283 /**
1284  * Add item @hnode to libcfs hash @hs using @key.  The registered
1285  * ops->hs_get function will be called if the item was added.
1286  * Returns 0 on success or -EALREADY on key collisions.
1287  */
1288 int
1289 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1290                     struct hlist_node *hnode)
1291 {
1292         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1293                -EALREADY : 0;
1294 }
1295 EXPORT_SYMBOL(cfs_hash_add_unique);
1296
1297 /**
1298  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1299  * already exists in the hash then ops->hs_get will be called on the
1300  * conflicting entry and that entry will be returned to the caller.
1301  * Otherwise ops->hs_get is called on the item which was added.
1302  */
1303 void *
1304 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1305                         struct hlist_node *hnode)
1306 {
1307         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1308
1309         return cfs_hash_object(hs, hnode);
1310 }
1311 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1312
1313 /**
1314  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1315  * is required to ensure the correct hash bucket is locked since there
1316  * is no direct linkage from the item to the bucket.  The object
1317  * removed from the hash will be returned and obs->hs_put is called
1318  * on the removed object.
1319  */
1320 void *
1321 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1322 {
1323         void           *obj  = NULL;
1324         int             bits = 0;
1325         struct cfs_hash_bd   bds[2];
1326
1327         cfs_hash_lock(hs, 0);
1328         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1329
1330         /* NB: do nothing if @hnode is not in hash table */
1331         if (hnode == NULL || !hlist_unhashed(hnode)) {
1332                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1333                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1334                 } else {
1335                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1336                                                                 key, hnode);
1337                 }
1338         }
1339
1340         if (hnode != NULL) {
1341                 obj  = cfs_hash_object(hs, hnode);
1342                 bits = cfs_hash_rehash_bits(hs);
1343         }
1344
1345         cfs_hash_dual_bd_unlock(hs, bds, 1);
1346         cfs_hash_unlock(hs, 0);
1347         if (bits > 0)
1348                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1349
1350         return obj;
1351 }
1352 EXPORT_SYMBOL(cfs_hash_del);
1353
1354 /**
1355  * Delete item given @key in libcfs hash @hs.  The first @key found in
1356  * the hash will be removed, if the key exists multiple times in the hash
1357  * @hs this function must be called once per key.  The removed object
1358  * will be returned and ops->hs_put is called on the removed object.
1359  */
1360 void *
1361 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1362 {
1363         return cfs_hash_del(hs, key, NULL);
1364 }
1365 EXPORT_SYMBOL(cfs_hash_del_key);
1366
1367 /**
1368  * Lookup an item using @key in the libcfs hash @hs and return it.
1369  * If the @key is found in the hash hs->hs_get() is called and the
1370  * matching objects is returned.  It is the callers responsibility
1371  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1372  * when when finished with the object.  If the @key was not found
1373  * in the hash @hs NULL is returned.
1374  */
1375 void *
1376 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1377 {
1378         void                 *obj = NULL;
1379         struct hlist_node     *hnode;
1380         struct cfs_hash_bd         bds[2];
1381
1382         cfs_hash_lock(hs, 0);
1383         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1384
1385         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1386         if (hnode != NULL)
1387                 obj = cfs_hash_object(hs, hnode);
1388
1389         cfs_hash_dual_bd_unlock(hs, bds, 0);
1390         cfs_hash_unlock(hs, 0);
1391
1392         return obj;
1393 }
1394 EXPORT_SYMBOL(cfs_hash_lookup);
1395
1396 static void
1397 cfs_hash_for_each_enter(struct cfs_hash *hs)
1398 {
1399         LASSERT(!cfs_hash_is_exiting(hs));
1400
1401         if (!cfs_hash_with_rehash(hs))
1402                 return;
1403         /*
1404          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1405          * because it's just an unreliable signal to rehash-thread,
1406          * rehash-thread will try to finish rehash ASAP when seeing this.
1407          */
1408         hs->hs_iterating = 1;
1409
1410         cfs_hash_lock(hs, 1);
1411         hs->hs_iterators++;
1412         cfs_hash_unlock(hs, 1);
1413
1414         /* NB: iteration is mostly called by service thread,
1415          * we tend to cancel pending rehash-request, instead of
1416          * blocking service thread, we will relaunch rehash request
1417          * after iteration
1418          */
1419         if (cfs_hash_is_rehashing(hs))
1420                 cfs_hash_rehash_cancel(hs);
1421 }
1422
1423 static void
1424 cfs_hash_for_each_exit(struct cfs_hash *hs)
1425 {
1426         int remained;
1427         int bits;
1428
1429         if (!cfs_hash_with_rehash(hs))
1430                 return;
1431         cfs_hash_lock(hs, 1);
1432         remained = --hs->hs_iterators;
1433         bits = cfs_hash_rehash_bits(hs);
1434         cfs_hash_unlock(hs, 1);
1435         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1436         if (remained == 0)
1437                 hs->hs_iterating = 0;
1438         if (bits > 0) {
1439                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1440                                     CFS_HASH_LOOP_HOG);
1441         }
1442 }
1443
1444 /**
1445  * For each item in the libcfs hash @hs call the passed callback @func
1446  * and pass to it as an argument each hash item and the private @data.
1447  *
1448  * a) the function may sleep!
1449  * b) during the callback:
1450  *    . the bucket lock is held so the callback must never sleep.
1451  *    . if @removal_safe is true, use can remove current item by
1452  *      cfs_hash_bd_del_locked
1453  */
1454 static __u64
1455 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1456                         void *data, int remove_safe)
1457 {
1458         struct hlist_node       *hnode;
1459         struct hlist_node       *pos;
1460         struct cfs_hash_bd      bd;
1461         __u64                   count = 0;
1462         int                     excl  = !!remove_safe;
1463         int                     loop  = 0;
1464         int                     i;
1465         ENTRY;
1466
1467         cfs_hash_for_each_enter(hs);
1468
1469         cfs_hash_lock(hs, 0);
1470         LASSERT(!cfs_hash_is_rehashing(hs));
1471
1472         cfs_hash_for_each_bucket(hs, &bd, i) {
1473                 struct hlist_head *hhead;
1474
1475                 cfs_hash_bd_lock(hs, &bd, excl);
1476                 if (func == NULL) { /* only glimpse size */
1477                         count += bd.bd_bucket->hsb_count;
1478                         cfs_hash_bd_unlock(hs, &bd, excl);
1479                         continue;
1480                 }
1481
1482                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1483                         hlist_for_each_safe(hnode, pos, hhead) {
1484                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1485                                 count++;
1486                                 loop++;
1487                                 if (func(hs, &bd, hnode, data)) {
1488                                         cfs_hash_bd_unlock(hs, &bd, excl);
1489                                         goto out;
1490                                 }
1491                         }
1492                 }
1493                 cfs_hash_bd_unlock(hs, &bd, excl);
1494                 if (loop < CFS_HASH_LOOP_HOG)
1495                         continue;
1496                 loop = 0;
1497                 cfs_hash_unlock(hs, 0);
1498                 cond_resched();
1499                 cfs_hash_lock(hs, 0);
1500         }
1501  out:
1502         cfs_hash_unlock(hs, 0);
1503
1504         cfs_hash_for_each_exit(hs);
1505         RETURN(count);
1506 }
1507
1508 struct cfs_hash_cond_arg {
1509         cfs_hash_cond_opt_cb_t  func;
1510         void                   *arg;
1511 };
1512
1513 static int
1514 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1515                          struct hlist_node *hnode, void *data)
1516 {
1517         struct cfs_hash_cond_arg *cond = data;
1518
1519         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1520                 cfs_hash_bd_del_locked(hs, bd, hnode);
1521         return 0;
1522 }
1523
1524 /**
1525  * Delete item from the libcfs hash @hs when @func return true.
1526  * The write lock being hold during loop for each bucket to avoid
1527  * any object be reference.
1528  */
1529 void
1530 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1531 {
1532         struct cfs_hash_cond_arg arg = {
1533                 .func   = func,
1534                 .arg    = data,
1535         };
1536
1537         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1538 }
1539 EXPORT_SYMBOL(cfs_hash_cond_del);
1540
1541 void
1542 cfs_hash_for_each(struct cfs_hash *hs,
1543                   cfs_hash_for_each_cb_t func, void *data)
1544 {
1545         cfs_hash_for_each_tight(hs, func, data, 0);
1546 }
1547 EXPORT_SYMBOL(cfs_hash_for_each);
1548
1549 void
1550 cfs_hash_for_each_safe(struct cfs_hash *hs,
1551                        cfs_hash_for_each_cb_t func, void *data)
1552 {
1553         cfs_hash_for_each_tight(hs, func, data, 1);
1554 }
1555 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1556
1557 static int
1558 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1559               struct hlist_node *hnode, void *data)
1560 {
1561         *(int *)data = 0;
1562         return 1; /* return 1 to break the loop */
1563 }
1564
1565 int
1566 cfs_hash_is_empty(struct cfs_hash *hs)
1567 {
1568         int empty = 1;
1569
1570         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1571         return empty;
1572 }
1573 EXPORT_SYMBOL(cfs_hash_is_empty);
1574
1575 __u64
1576 cfs_hash_size_get(struct cfs_hash *hs)
1577 {
1578         return cfs_hash_with_counter(hs) ?
1579                atomic_read(&hs->hs_count) :
1580                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1581 }
1582 EXPORT_SYMBOL(cfs_hash_size_get);
1583
1584 /*
1585  * cfs_hash_for_each_relax:
1586  * Iterate the hash table and call @func on each item without
1587  * any lock. This function can't guarantee to finish iteration
1588  * if these features are enabled:
1589  *
1590  *  a. if rehash_key is enabled, an item can be moved from
1591  *     one bucket to another bucket
1592  *  b. user can remove non-zero-ref item from hash-table,
1593  *     so the item can be removed from hash-table, even worse,
1594  *     it's possible that user changed key and insert to another
1595  *     hash bucket.
1596  * there's no way for us to finish iteration correctly on previous
1597  * two cases, so iteration has to be stopped on change.
1598  */
1599 static int
1600 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1601                         void *data, int start)
1602 {
1603         struct hlist_node       *hnode;
1604         struct hlist_node       *next = NULL;
1605         struct cfs_hash_bd      bd;
1606         __u32                   version;
1607         int                     count = 0;
1608         int                     stop_on_change;
1609         int                     has_put_locked;
1610         int                     rc = 0;
1611         int                     i, end = -1;
1612         ENTRY;
1613
1614         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1615                          !cfs_hash_with_no_itemref(hs);
1616         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1617         cfs_hash_lock(hs, 0);
1618 again:
1619         LASSERT(!cfs_hash_is_rehashing(hs));
1620
1621         cfs_hash_for_each_bucket(hs, &bd, i) {
1622                 struct hlist_head *hhead;
1623
1624                 if (i < start)
1625                         continue;
1626                 else if (end > 0 && i >= end)
1627                         break;
1628
1629                 cfs_hash_bd_lock(hs, &bd, 0);
1630                 version = cfs_hash_bd_version_get(&bd);
1631
1632                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1633                         hnode = hhead->first;
1634                         if (hnode == NULL)
1635                                 continue;
1636                         cfs_hash_get(hs, hnode);
1637                         for (; hnode != NULL; hnode = next) {
1638                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1639                                 next = hnode->next;
1640                                 if (next != NULL)
1641                                         cfs_hash_get(hs, next);
1642                                 cfs_hash_bd_unlock(hs, &bd, 0);
1643                                 cfs_hash_unlock(hs, 0);
1644
1645                                 rc = func(hs, &bd, hnode, data);
1646                                 if (stop_on_change || !has_put_locked)
1647                                         cfs_hash_put(hs, hnode);
1648
1649                                 cond_resched();
1650                                 count++;
1651
1652                                 cfs_hash_lock(hs, 0);
1653                                 cfs_hash_bd_lock(hs, &bd, 0);
1654                                 if (stop_on_change) {
1655                                         if (version !=
1656                                             cfs_hash_bd_version_get(&bd))
1657                                                 rc = -EINTR;
1658                                 } else if (has_put_locked) {
1659                                         cfs_hash_put_locked(hs, hnode);
1660                                 }
1661                                 if (rc) /* callback wants to break iteration */
1662                                         break;
1663                         }
1664                         if (next != NULL) {
1665                                 if (has_put_locked) {
1666                                         cfs_hash_put_locked(hs, next);
1667                                         next = NULL;
1668                                 }
1669                                 break;
1670                         } else if (rc != 0) {
1671                                 break;
1672                         }
1673                 }
1674                 cfs_hash_bd_unlock(hs, &bd, 0);
1675                 if (next != NULL && !has_put_locked) {
1676                         cfs_hash_put(hs, next);
1677                         next = NULL;
1678                 }
1679                 if (rc) /* callback wants to break iteration */
1680                         break;
1681         }
1682
1683         if (start > 0 && rc == 0) {
1684                 end = start;
1685                 start = 0;
1686                 goto again;
1687         }
1688
1689         cfs_hash_unlock(hs, 0);
1690         return count;
1691 }
1692
1693 int
1694 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1695                          cfs_hash_for_each_cb_t func, void *data, int start)
1696 {
1697         ENTRY;
1698
1699         if (cfs_hash_with_no_lock(hs) ||
1700             cfs_hash_with_rehash_key(hs) ||
1701             !cfs_hash_with_no_itemref(hs))
1702                 RETURN(-EOPNOTSUPP);
1703
1704         if (hs->hs_ops->hs_get == NULL ||
1705            (hs->hs_ops->hs_put == NULL &&
1706             hs->hs_ops->hs_put_locked == NULL))
1707                 RETURN(-EOPNOTSUPP);
1708
1709         cfs_hash_for_each_enter(hs);
1710         cfs_hash_for_each_relax(hs, func, data, start);
1711         cfs_hash_for_each_exit(hs);
1712
1713         RETURN(0);
1714 }
1715 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1716
1717 /**
1718  * For each hash bucket in the libcfs hash @hs call the passed callback
1719  * @func until all the hash buckets are empty.  The passed callback @func
1720  * or the previously registered callback hs->hs_put must remove the item
1721  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1722  * functions.  No rwlocks will be held during the callback @func it is
1723  * safe to sleep if needed.  This function will not terminate until the
1724  * hash is empty.  Note it is still possible to concurrently add new
1725  * items in to the hash.  It is the callers responsibility to ensure
1726  * the required locking is in place to prevent concurrent insertions.
1727  */
1728 int
1729 cfs_hash_for_each_empty(struct cfs_hash *hs,
1730                         cfs_hash_for_each_cb_t func, void *data)
1731 {
1732         unsigned  i = 0;
1733         ENTRY;
1734
1735         if (cfs_hash_with_no_lock(hs))
1736                 return -EOPNOTSUPP;
1737
1738         if (hs->hs_ops->hs_get == NULL ||
1739            (hs->hs_ops->hs_put == NULL &&
1740             hs->hs_ops->hs_put_locked == NULL))
1741                 return -EOPNOTSUPP;
1742
1743         cfs_hash_for_each_enter(hs);
1744         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1745                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1746                        hs->hs_name, i++);
1747         }
1748         cfs_hash_for_each_exit(hs);
1749         RETURN(0);
1750 }
1751 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1752
1753 void
1754 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1755                         cfs_hash_for_each_cb_t func, void *data)
1756 {
1757         struct hlist_head *hhead;
1758         struct hlist_node *hnode;
1759         struct cfs_hash_bd         bd;
1760
1761         cfs_hash_for_each_enter(hs);
1762         cfs_hash_lock(hs, 0);
1763         if (hindex >= CFS_HASH_NHLIST(hs))
1764                 goto out;
1765
1766         cfs_hash_bd_index_set(hs, hindex, &bd);
1767
1768         cfs_hash_bd_lock(hs, &bd, 0);
1769         hhead = cfs_hash_bd_hhead(hs, &bd);
1770         hlist_for_each(hnode, hhead) {
1771                 if (func(hs, &bd, hnode, data))
1772                         break;
1773         }
1774         cfs_hash_bd_unlock(hs, &bd, 0);
1775 out:
1776         cfs_hash_unlock(hs, 0);
1777         cfs_hash_for_each_exit(hs);
1778 }
1779
1780 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1781
1782 /*
1783  * For each item in the libcfs hash @hs which matches the @key call
1784  * the passed callback @func and pass to it as an argument each hash
1785  * item and the private @data. During the callback the bucket lock
1786  * is held so the callback must never sleep.
1787    */
1788 void
1789 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1790                         cfs_hash_for_each_cb_t func, void *data)
1791 {
1792         struct hlist_node *hnode;
1793         struct cfs_hash_bd         bds[2];
1794         unsigned           i;
1795
1796         cfs_hash_lock(hs, 0);
1797
1798         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1799
1800         cfs_hash_for_each_bd(bds, 2, i) {
1801                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1802
1803                 hlist_for_each(hnode, hlist) {
1804                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1805
1806                         if (cfs_hash_keycmp(hs, key, hnode)) {
1807                                 if (func(hs, &bds[i], hnode, data))
1808                                         break;
1809                         }
1810                 }
1811         }
1812
1813         cfs_hash_dual_bd_unlock(hs, bds, 0);
1814         cfs_hash_unlock(hs, 0);
1815 }
1816 EXPORT_SYMBOL(cfs_hash_for_each_key);
1817
1818 /**
1819  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1820  * to grow the hash size when excessive chaining is detected, or to
1821  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1822  * flag is set in @hs the libcfs hash may be dynamically rehashed
1823  * during addition or removal if the hash's theta value exceeds
1824  * either the hs->hs_min_theta or hs->max_theta values.  By default
1825  * these values are tuned to keep the chained hash depth small, and
1826  * this approach assumes a reasonably uniform hashing function.  The
1827  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1828  */
1829 void
1830 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1831 {
1832         LASSERT(cfs_hash_with_rehash(hs));
1833         cancel_work_sync(&hs->hs_rehash_work);
1834 }
1835
1836 void
1837 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1838 {
1839         int     rc;
1840
1841         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1842
1843         cfs_hash_lock(hs, 1);
1844
1845         rc = cfs_hash_rehash_bits(hs);
1846         if (rc <= 0) {
1847                 cfs_hash_unlock(hs, 1);
1848                 return;
1849         }
1850
1851         hs->hs_rehash_bits = rc;
1852         if (!do_rehash) {
1853                 /* launch and return */
1854                 queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
1855                 cfs_hash_unlock(hs, 1);
1856                 return;
1857         }
1858
1859         /* rehash right now */
1860         cfs_hash_unlock(hs, 1);
1861
1862         cfs_hash_rehash_worker(&hs->hs_rehash_work);
1863 }
1864
1865 static int
1866 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1867 {
1868         struct cfs_hash_bd      new;
1869         struct hlist_head *hhead;
1870         struct hlist_node *hnode;
1871         struct hlist_node *pos;
1872         void              *key;
1873         int                c = 0;
1874
1875         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1876         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1877                 hlist_for_each_safe(hnode, pos, hhead) {
1878                         key = cfs_hash_key(hs, hnode);
1879                         LASSERT(key != NULL);
1880                         /* Validate hnode is in the correct bucket. */
1881                         cfs_hash_bucket_validate(hs, old, hnode);
1882                         /*
1883                          * Delete from old hash bucket; move to new bucket.
1884                          * ops->hs_key must be defined.
1885                          */
1886                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1887                                              hs->hs_rehash_bits, key, &new);
1888                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1889                         c++;
1890                 }
1891         }
1892         return c;
1893 }
1894
1895 static void
1896 cfs_hash_rehash_worker(struct work_struct *work)
1897 {
1898         struct cfs_hash *hs = container_of(work, struct cfs_hash,
1899                                            hs_rehash_work);
1900         struct cfs_hash_bucket **bkts;
1901         struct cfs_hash_bd      bd;
1902         unsigned int            old_size;
1903         unsigned int            new_size;
1904         int                     bsize;
1905         int                     count = 0;
1906         int                     rc = 0;
1907         int                     i;
1908
1909         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1910
1911         cfs_hash_lock(hs, 0);
1912         LASSERT(cfs_hash_is_rehashing(hs));
1913
1914         old_size = CFS_HASH_NBKT(hs);
1915         new_size = CFS_HASH_RH_NBKT(hs);
1916
1917         cfs_hash_unlock(hs, 0);
1918
1919         /*
1920          * don't need hs::hs_rwlock for hs::hs_buckets,
1921          * because nobody can change bkt-table except me.
1922          */
1923         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1924                                         old_size, new_size);
1925         cfs_hash_lock(hs, 1);
1926         if (bkts == NULL) {
1927                 rc = -ENOMEM;
1928                 goto out;
1929         }
1930
1931         if (bkts == hs->hs_buckets) {
1932                 bkts = NULL; /* do nothing */
1933                 goto out;
1934         }
1935
1936         rc = __cfs_hash_theta(hs);
1937         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1938                 /* free the new allocated bkt-table */
1939                 old_size = new_size;
1940                 new_size = CFS_HASH_NBKT(hs);
1941                 rc = -EALREADY;
1942                 goto out;
1943         }
1944
1945         LASSERT(hs->hs_rehash_buckets == NULL);
1946         hs->hs_rehash_buckets = bkts;
1947
1948         rc = 0;
1949         cfs_hash_for_each_bucket(hs, &bd, i) {
1950                 if (cfs_hash_is_exiting(hs)) {
1951                         rc = -ESRCH;
1952                         /* someone wants to destroy the hash, abort now */
1953                         if (old_size < new_size) /* OK to free old bkt-table */
1954                                 break;
1955                         /* it's shrinking, need free new bkt-table */
1956                         hs->hs_rehash_buckets = NULL;
1957                         old_size = new_size;
1958                         new_size = CFS_HASH_NBKT(hs);
1959                         goto out;
1960                 }
1961
1962                 count += cfs_hash_rehash_bd(hs, &bd);
1963                 if (count < CFS_HASH_LOOP_HOG ||
1964                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1965                         continue;
1966                 }
1967
1968                 count = 0;
1969                 cfs_hash_unlock(hs, 1);
1970                 cond_resched();
1971                 cfs_hash_lock(hs, 1);
1972         }
1973
1974         hs->hs_rehash_count++;
1975
1976         bkts = hs->hs_buckets;
1977         hs->hs_buckets = hs->hs_rehash_buckets;
1978         hs->hs_rehash_buckets = NULL;
1979
1980         hs->hs_cur_bits = hs->hs_rehash_bits;
1981 out:
1982         hs->hs_rehash_bits = 0;
1983         bsize = cfs_hash_bkt_size(hs);
1984         cfs_hash_unlock(hs, 1);
1985         /* can't refer to @hs anymore because it could be destroyed */
1986         if (bkts != NULL)
1987                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1988         if (rc != 0)
1989                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1990 }
1991
1992 /**
1993  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1994  * @old_key must be provided to locate the objects previous location
1995  * in the hash, and the @new_key will be used to reinsert the object.
1996  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1997  * combo when it is critical that there is no window in time where the
1998  * object is missing from the hash.  When an object is being rehashed
1999  * the registered cfs_hash_get() and cfs_hash_put() functions will
2000  * not be called.
2001  */
2002 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2003                          void *new_key, struct hlist_node *hnode)
2004 {
2005         struct cfs_hash_bd        bds[3];
2006         struct cfs_hash_bd        old_bds[2];
2007         struct cfs_hash_bd        new_bd;
2008
2009         LASSERT(!hlist_unhashed(hnode));
2010
2011         cfs_hash_lock(hs, 0);
2012
2013         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2014         cfs_hash_bd_get(hs, new_key, &new_bd);
2015
2016         bds[0] = old_bds[0];
2017         bds[1] = old_bds[1];
2018         bds[2] = new_bd;
2019
2020         /* NB: bds[0] and bds[1] are ordered already */
2021         cfs_hash_bd_order(&bds[1], &bds[2]);
2022         cfs_hash_bd_order(&bds[0], &bds[1]);
2023
2024         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2025         if (likely(old_bds[1].bd_bucket == NULL)) {
2026                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2027         } else {
2028                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2029                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2030         }
2031         /* overwrite key inside locks, otherwise may screw up with
2032          * other operations, i.e: rehash */
2033         cfs_hash_keycpy(hs, hnode, new_key);
2034
2035         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2036         cfs_hash_unlock(hs, 0);
2037 }
2038 EXPORT_SYMBOL(cfs_hash_rehash_key);
2039
2040 void cfs_hash_debug_header(struct seq_file *m)
2041 {
2042         seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2043                    CFS_HASH_BIGNAME_LEN, "name");
2044 }
2045 EXPORT_SYMBOL(cfs_hash_debug_header);
2046
2047 static struct cfs_hash_bucket **
2048 cfs_hash_full_bkts(struct cfs_hash *hs)
2049 {
2050         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2051         if (hs->hs_rehash_buckets == NULL)
2052                 return hs->hs_buckets;
2053
2054         LASSERT(hs->hs_rehash_bits != 0);
2055         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2056                hs->hs_rehash_buckets : hs->hs_buckets;
2057 }
2058
2059 static unsigned int
2060 cfs_hash_full_nbkt(struct cfs_hash *hs)
2061 {
2062         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2063         if (hs->hs_rehash_buckets == NULL)
2064                 return CFS_HASH_NBKT(hs);
2065
2066         LASSERT(hs->hs_rehash_bits != 0);
2067         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2068                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2069 }
2070
2071 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2072 {
2073         int dist[8] = { 0, };
2074         int maxdep = -1;
2075         int maxdepb = -1;
2076         int total = 0;
2077         int theta;
2078         int i;
2079
2080         cfs_hash_lock(hs, 0);
2081         theta = __cfs_hash_theta(hs);
2082
2083         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2084                    CFS_HASH_BIGNAME_LEN, hs->hs_name,
2085                    1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2086                    1 << hs->hs_max_bits,
2087                    __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2088                    __cfs_hash_theta_int(hs->hs_min_theta),
2089                    __cfs_hash_theta_frac(hs->hs_min_theta),
2090                    __cfs_hash_theta_int(hs->hs_max_theta),
2091                    __cfs_hash_theta_frac(hs->hs_max_theta),
2092                    hs->hs_flags, hs->hs_rehash_count);
2093
2094         /*
2095          * The distribution is a summary of the chained hash depth in
2096          * each of the libcfs hash buckets.  Each buckets hsb_count is
2097          * divided by the hash theta value and used to generate a
2098          * histogram of the hash distribution.  A uniform hash will
2099          * result in all hash buckets being close to the average thus
2100          * only the first few entries in the histogram will be non-zero.
2101          * If you hash function results in a non-uniform hash the will
2102          * be observable by outlier bucks in the distribution histogram.
2103          *
2104          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2105          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2106          */
2107         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2108                 struct cfs_hash_bd bd;
2109
2110                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2111                 cfs_hash_bd_lock(hs, &bd, 0);
2112                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2113                         maxdep  = bd.bd_bucket->hsb_depmax;
2114                         maxdepb = ffz(~maxdep);
2115                 }
2116                 total += bd.bd_bucket->hsb_count;
2117                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2118                 cfs_hash_bd_unlock(hs, &bd, 0);
2119         }
2120
2121         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2122         for (i = 0; i < 8; i++)
2123                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2124
2125         cfs_hash_unlock(hs, 0);
2126 }
2127 EXPORT_SYMBOL(cfs_hash_debug_str);