Whamcloud - gitweb
LU-15207 libcfs: reset hs_rehash_bits
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * libcfs/libcfs/hash.c
32  *
33  * Implement a hash class for hash process in lustre system.
34  *
35  * Author: YuZhangyong <yzy@clusterfs.com>
36  *
37  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
38  * - Simplified API and improved documentation
39  * - Added per-hash feature flags:
40  *   * CFS_HASH_DEBUG additional validation
41  *   * CFS_HASH_REHASH dynamic rehashing
42  * - Added per-hash statistics
43  * - General performance enhancements
44  *
45  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
46  * - move all stuff to libcfs
47  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
48  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
49  * - buckets are allocated one by one(instead of contiguous memory),
50  *   to avoid unnecessary cacheline conflict
51  *
52  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
53  * - "bucket" is a group of hlist_head now, user can specify bucket size
54  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
55  *   one lock for reducing memory overhead.
56  *
57  * - support lockless hash, caller will take care of locks:
58  *   avoid lock overhead for hash tables that are already protected
59  *   by locking in the caller for another reason
60  *
61  * - support both spin_lock/rwlock for bucket:
62  *   overhead of spinlock contention is lower than read/write
63  *   contention of rwlock, so using spinlock to serialize operations on
64  *   bucket is more reasonable for those frequently changed hash tables
65  *
66  * - support one-single lock mode:
67  *   one lock to protect all hash operations to avoid overhead of
68  *   multiple locks if hash table is always small
69  *
70  * - removed a lot of unnecessary addref & decref on hash element:
71  *   addref & decref are atomic operations in many use-cases which
72  *   are expensive.
73  *
74  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
75  *   some lustre use-cases require these functions to be strictly
76  *   non-blocking, we need to schedule required rehash on a different
77  *   thread on those cases.
78  *
79  * - safer rehash on large hash table
80  *   In old implementation, rehash function will exclusively lock the
81  *   hash table and finish rehash in one batch, it's dangerous on SMP
82  *   system because rehash millions of elements could take long time.
83  *   New implemented rehash can release lock and relax CPU in middle
84  *   of rehash, it's safe for another thread to search/change on the
85  *   hash table even it's in rehasing.
86  *
87  * - support two different refcount modes
88  *   . hash table has refcount on element
89  *   . hash table doesn't change refcount on adding/removing element
90  *
91  * - support long name hash table (for param-tree)
92  *
93  * - fix a bug for cfs_hash_rehash_key:
94  *   in old implementation, cfs_hash_rehash_key could screw up the
95  *   hash-table because @key is overwritten without any protection.
96  *   Now we need user to define hs_keycpy for those rehash enabled
97  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
98  *   inside lock by calling hs_keycpy.
99  *
100  * - better hash iteration:
101  *   Now we support both locked iteration & lockless iteration of hash
102  *   table. Also, user can break the iteration by return 1 in callback.
103  */
104 #include <linux/seq_file.h>
105 #include <linux/log2.h>
106
107 #include <libcfs/linux/linux-list.h>
108 #include <libcfs/libcfs.h>
109
110 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
111 static unsigned int warn_on_depth = 8;
112 module_param(warn_on_depth, uint, 0644);
113 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
114 #endif
115
116 struct workqueue_struct *cfs_rehash_wq;
117
118 static inline void
119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
120
121 static inline void
122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
126         __acquires(&lock->spin)
127 {
128         spin_lock(&lock->spin);
129 }
130
131 static inline void
132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
133         __releases(&lock->spin)
134 {
135         spin_unlock(&lock->spin);
136 }
137
138 static inline void
139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
140         __acquires(&lock->rw)
141 {
142         if (!exclusive)
143                 read_lock(&lock->rw);
144         else
145                 write_lock(&lock->rw);
146 }
147
148 static inline void
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150         __releases(&lock->rw)
151 {
152         if (!exclusive)
153                 read_unlock(&lock->rw);
154         else
155                 write_unlock(&lock->rw);
156 }
157
158 static inline void
159 cfs_hash_rw_sem_lock(union cfs_hash_lock *lock, int exclusive)
160         __acquires(&lock->rw_sem)
161 {
162         if (!exclusive)
163                 down_read(&lock->rw_sem);
164         else
165                 down_write(&lock->rw_sem);
166 }
167
168 static inline void
169 cfs_hash_rw_sem_unlock(union cfs_hash_lock *lock, int exclusive)
170         __releases(&lock->rw_sem)
171 {
172         if (!exclusive)
173                 up_read(&lock->rw_sem);
174         else
175                 up_write(&lock->rw_sem);
176 }
177
178 /** No lock hash */
179 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
180         .hs_lock        = cfs_hash_nl_lock,
181         .hs_unlock      = cfs_hash_nl_unlock,
182         .hs_bkt_lock    = cfs_hash_nl_lock,
183         .hs_bkt_unlock  = cfs_hash_nl_unlock,
184 };
185
186 /** no bucket lock, one spinlock to protect everything */
187 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
188         .hs_lock        = cfs_hash_spin_lock,
189         .hs_unlock      = cfs_hash_spin_unlock,
190         .hs_bkt_lock    = cfs_hash_nl_lock,
191         .hs_bkt_unlock  = cfs_hash_nl_unlock,
192 };
193
194 /** spin bucket lock, rehash is enabled */
195 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
196         .hs_lock        = cfs_hash_rw_lock,
197         .hs_unlock      = cfs_hash_rw_unlock,
198         .hs_bkt_lock    = cfs_hash_spin_lock,
199         .hs_bkt_unlock  = cfs_hash_spin_unlock,
200 };
201
202 /** rw bucket lock, rehash is enabled */
203 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
204         .hs_lock        = cfs_hash_rw_lock,
205         .hs_unlock      = cfs_hash_rw_unlock,
206         .hs_bkt_lock    = cfs_hash_rw_lock,
207         .hs_bkt_unlock  = cfs_hash_rw_unlock,
208 };
209
210 /** spin bucket lock, rehash is disabled */
211 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
212         .hs_lock        = cfs_hash_nl_lock,
213         .hs_unlock      = cfs_hash_nl_unlock,
214         .hs_bkt_lock    = cfs_hash_spin_lock,
215         .hs_bkt_unlock  = cfs_hash_spin_unlock,
216 };
217
218 /** rw bucket lock, rehash is disabled */
219 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
220         .hs_lock        = cfs_hash_nl_lock,
221         .hs_unlock      = cfs_hash_nl_unlock,
222         .hs_bkt_lock    = cfs_hash_rw_lock,
223         .hs_bkt_unlock  = cfs_hash_rw_unlock,
224 };
225
226 /** rw_sem bucket lock, rehash is disabled */
227 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_sem_lops = {
228         .hs_lock        = cfs_hash_nl_lock,
229         .hs_unlock      = cfs_hash_nl_unlock,
230         .hs_bkt_lock    = cfs_hash_rw_sem_lock,
231         .hs_bkt_unlock  = cfs_hash_rw_sem_unlock,
232 };
233
234 /** rw_sem bucket lock, rehash is enabled */
235 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_sem_lops = {
236         .hs_lock        = cfs_hash_rw_sem_lock,
237         .hs_unlock      = cfs_hash_rw_sem_unlock,
238         .hs_bkt_lock    = cfs_hash_rw_sem_lock,
239         .hs_bkt_unlock  = cfs_hash_rw_sem_unlock,
240 };
241
242 static void
243 cfs_hash_lock_setup(struct cfs_hash *hs)
244 {
245         if (cfs_hash_with_no_lock(hs)) {
246                 hs->hs_lops = &cfs_hash_nl_lops;
247
248         } else if (cfs_hash_with_no_bktlock(hs)) {
249                 hs->hs_lops = &cfs_hash_nbl_lops;
250                 spin_lock_init(&hs->hs_lock.spin);
251
252         } else if (cfs_hash_with_rehash(hs)) {
253                 if (cfs_hash_with_rw_sem_bktlock(hs)) {
254                         init_rwsem(&hs->hs_lock.rw_sem);
255                         hs->hs_lops = &cfs_hash_bkt_rw_sem_lops;
256                 } else {
257                         rwlock_init(&hs->hs_lock.rw);
258
259                         if (cfs_hash_with_rw_bktlock(hs))
260                                 hs->hs_lops = &cfs_hash_bkt_rw_lops;
261                         else if (cfs_hash_with_spin_bktlock(hs))
262                                 hs->hs_lops = &cfs_hash_bkt_spin_lops;
263                         else
264                                 LBUG();
265                 }
266         } else {
267                 if (cfs_hash_with_rw_bktlock(hs))
268                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
269                 else if (cfs_hash_with_spin_bktlock(hs))
270                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
271                 else if (cfs_hash_with_rw_sem_bktlock(hs))
272                         hs->hs_lops = &cfs_hash_nr_bkt_rw_sem_lops;
273                 else
274                         LBUG();
275         }
276 }
277
278 /**
279  * Simple hash head without depth tracking
280  * new element is always added to head of hlist
281  */
282 struct cfs_hash_head {
283         struct hlist_head       hh_head;        /**< entries list */
284 };
285
286 static int
287 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
288 {
289         return sizeof(struct cfs_hash_head);
290 }
291
292 static struct hlist_head *
293 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
294 {
295         struct cfs_hash_head *head;
296
297         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
298         return &head[bd->bd_offset].hh_head;
299 }
300
301 static int
302 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303                       struct hlist_node *hnode)
304 {
305         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
306         return -1; /* unknown depth */
307 }
308
309 static int
310 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
311                       struct hlist_node *hnode)
312 {
313         hlist_del_init(hnode);
314         return -1; /* unknown depth */
315 }
316
317 /**
318  * Simple hash head with depth tracking
319  * new element is always added to head of hlist
320  */
321 struct cfs_hash_head_dep {
322         struct hlist_head       hd_head;        /**< entries list */
323         unsigned int            hd_depth;       /**< list length */
324 };
325
326 static int
327 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
328 {
329         return sizeof(struct cfs_hash_head_dep);
330 }
331
332 static struct hlist_head *
333 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
334 {
335         struct cfs_hash_head_dep   *head;
336
337         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
338         return &head[bd->bd_offset].hd_head;
339 }
340
341 static int
342 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
343                       struct hlist_node *hnode)
344 {
345         struct cfs_hash_head_dep *hh;
346
347         hh = container_of(cfs_hash_hd_hhead(hs, bd),
348                           struct cfs_hash_head_dep, hd_head);
349         hlist_add_head(hnode, &hh->hd_head);
350         return ++hh->hd_depth;
351 }
352
353 static int
354 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
355                       struct hlist_node *hnode)
356 {
357         struct cfs_hash_head_dep *hh;
358
359         hh = container_of(cfs_hash_hd_hhead(hs, bd),
360                           struct cfs_hash_head_dep, hd_head);
361         hlist_del_init(hnode);
362         return --hh->hd_depth;
363 }
364
365 /**
366  * double links hash head without depth tracking
367  * new element is always added to tail of hlist
368  */
369 struct cfs_hash_dhead {
370         struct hlist_head       dh_head;        /**< entries list */
371         struct hlist_node       *dh_tail;       /**< the last entry */
372 };
373
374 static int
375 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
376 {
377         return sizeof(struct cfs_hash_dhead);
378 }
379
380 static struct hlist_head *
381 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
382 {
383         struct cfs_hash_dhead *head;
384
385         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
386         return &head[bd->bd_offset].dh_head;
387 }
388
389 static int
390 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
391                       struct hlist_node *hnode)
392 {
393         struct cfs_hash_dhead *dh;
394
395         dh = container_of(cfs_hash_dh_hhead(hs, bd),
396                           struct cfs_hash_dhead, dh_head);
397         if (dh->dh_tail != NULL) /* not empty */
398                 hlist_add_behind(hnode, dh->dh_tail);
399         else /* empty list */
400                 hlist_add_head(hnode, &dh->dh_head);
401         dh->dh_tail = hnode;
402         return -1; /* unknown depth */
403 }
404
405 static int
406 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
407                       struct hlist_node *hnd)
408 {
409         struct cfs_hash_dhead *dh;
410
411         dh = container_of(cfs_hash_dh_hhead(hs, bd),
412                           struct cfs_hash_dhead, dh_head);
413         if (hnd->next == NULL) { /* it's the tail */
414                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
415                               container_of(hnd->pprev, struct hlist_node, next);
416         }
417         hlist_del_init(hnd);
418         return -1; /* unknown depth */
419 }
420
421 /**
422  * double links hash head with depth tracking
423  * new element is always added to tail of hlist
424  */
425 struct cfs_hash_dhead_dep {
426         struct hlist_head       dd_head;        /**< entries list */
427         struct hlist_node       *dd_tail;       /**< the last entry */
428         unsigned int            dd_depth;       /**< list length */
429 };
430
431 static int
432 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
433 {
434         return sizeof(struct cfs_hash_dhead_dep);
435 }
436
437 static struct hlist_head *
438 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
439 {
440         struct cfs_hash_dhead_dep *head;
441
442         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
443         return &head[bd->bd_offset].dd_head;
444 }
445
446 static int
447 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
448                       struct hlist_node *hnode)
449 {
450         struct cfs_hash_dhead_dep *dh;
451
452         dh = container_of(cfs_hash_dd_hhead(hs, bd),
453                           struct cfs_hash_dhead_dep, dd_head);
454         if (dh->dd_tail != NULL) /* not empty */
455                 hlist_add_behind(hnode, dh->dd_tail);
456         else /* empty list */
457                 hlist_add_head(hnode, &dh->dd_head);
458         dh->dd_tail = hnode;
459         return ++dh->dd_depth;
460 }
461
462 static int
463 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
464                       struct hlist_node *hnd)
465 {
466         struct cfs_hash_dhead_dep *dh;
467
468         dh = container_of(cfs_hash_dd_hhead(hs, bd),
469                           struct cfs_hash_dhead_dep, dd_head);
470         if (hnd->next == NULL) { /* it's the tail */
471                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
472                               container_of(hnd->pprev, struct hlist_node, next);
473         }
474         hlist_del_init(hnd);
475         return --dh->dd_depth;
476 }
477
478 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
479        .hop_hhead      = cfs_hash_hh_hhead,
480        .hop_hhead_size = cfs_hash_hh_hhead_size,
481        .hop_hnode_add  = cfs_hash_hh_hnode_add,
482        .hop_hnode_del  = cfs_hash_hh_hnode_del,
483 };
484
485 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
486        .hop_hhead      = cfs_hash_hd_hhead,
487        .hop_hhead_size = cfs_hash_hd_hhead_size,
488        .hop_hnode_add  = cfs_hash_hd_hnode_add,
489        .hop_hnode_del  = cfs_hash_hd_hnode_del,
490 };
491
492 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
493        .hop_hhead      = cfs_hash_dh_hhead,
494        .hop_hhead_size = cfs_hash_dh_hhead_size,
495        .hop_hnode_add  = cfs_hash_dh_hnode_add,
496        .hop_hnode_del  = cfs_hash_dh_hnode_del,
497 };
498
499 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
500        .hop_hhead      = cfs_hash_dd_hhead,
501        .hop_hhead_size = cfs_hash_dd_hhead_size,
502        .hop_hnode_add  = cfs_hash_dd_hnode_add,
503        .hop_hnode_del  = cfs_hash_dd_hnode_del,
504 };
505
506 static void
507 cfs_hash_hlist_setup(struct cfs_hash *hs)
508 {
509         if (cfs_hash_with_add_tail(hs)) {
510                 hs->hs_hops = cfs_hash_with_depth(hs) ?
511                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
512         } else {
513                 hs->hs_hops = cfs_hash_with_depth(hs) ?
514                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
515         }
516 }
517
518 static void
519 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
520                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
521 {
522         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
523
524         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
525
526         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
527         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
528 }
529
530 void
531 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
532 {
533         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
534         if (likely(hs->hs_rehash_buckets == NULL)) {
535                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
536                                      hs->hs_cur_bits, key, bd);
537         } else {
538                 LASSERT(hs->hs_rehash_bits != 0);
539                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
540                                      hs->hs_rehash_bits, key, bd);
541         }
542 }
543 EXPORT_SYMBOL(cfs_hash_bd_get);
544
545 static inline void
546 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
547 {
548         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
549                 return;
550
551         bd->bd_bucket->hsb_depmax = dep_cur;
552 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
553         if (likely(warn_on_depth == 0 ||
554                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
555                 return;
556
557         spin_lock(&hs->hs_dep_lock);
558         hs->hs_dep_max  = dep_cur;
559         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
560         hs->hs_dep_off  = bd->bd_offset;
561         hs->hs_dep_bits = hs->hs_cur_bits;
562         spin_unlock(&hs->hs_dep_lock);
563
564         queue_work(cfs_rehash_wq, &hs->hs_dep_work);
565 # endif
566 }
567
568 void
569 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
570                         struct hlist_node *hnode)
571 {
572         int rc;
573
574         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
575         cfs_hash_bd_dep_record(hs, bd, rc);
576         bd->bd_bucket->hsb_version++;
577         if (unlikely(bd->bd_bucket->hsb_version == 0))
578                 bd->bd_bucket->hsb_version++;
579         bd->bd_bucket->hsb_count++;
580
581         if (cfs_hash_with_counter(hs))
582                 atomic_inc(&hs->hs_count);
583         if (!cfs_hash_with_no_itemref(hs))
584                 cfs_hash_get(hs, hnode);
585 }
586 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
587
588 void
589 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
590                        struct hlist_node *hnode)
591 {
592         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
593
594         LASSERT(bd->bd_bucket->hsb_count > 0);
595         bd->bd_bucket->hsb_count--;
596         bd->bd_bucket->hsb_version++;
597         if (unlikely(bd->bd_bucket->hsb_version == 0))
598                 bd->bd_bucket->hsb_version++;
599
600         if (cfs_hash_with_counter(hs)) {
601                 LASSERT(atomic_read(&hs->hs_count) > 0);
602                 atomic_dec(&hs->hs_count);
603         }
604         if (!cfs_hash_with_no_itemref(hs))
605                 cfs_hash_put_locked(hs, hnode);
606 }
607 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
608
609 void
610 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
611                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
612 {
613         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
614         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
615         int                rc;
616
617         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
618                 return;
619
620         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
621          * in cfs_hash_bd_del/add_locked */
622         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
623         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
624         cfs_hash_bd_dep_record(hs, bd_new, rc);
625
626         LASSERT(obkt->hsb_count > 0);
627         obkt->hsb_count--;
628         obkt->hsb_version++;
629         if (unlikely(obkt->hsb_version == 0))
630                 obkt->hsb_version++;
631         nbkt->hsb_count++;
632         nbkt->hsb_version++;
633         if (unlikely(nbkt->hsb_version == 0))
634                 nbkt->hsb_version++;
635 }
636
637 enum {
638         /** always set, for sanity (avoid ZERO intent) */
639         CFS_HS_LOOKUP_MASK_FIND         = BIT(0),
640         /** return entry with a ref */
641         CFS_HS_LOOKUP_MASK_REF          = BIT(1),
642         /** add entry if not existing */
643         CFS_HS_LOOKUP_MASK_ADD          = BIT(2),
644         /** delete entry, ignore other masks */
645         CFS_HS_LOOKUP_MASK_DEL          = BIT(3),
646 };
647
648 enum cfs_hash_lookup_intent {
649         /** return item w/o refcount */
650         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
651         /** return item with refcount */
652         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
653                                        CFS_HS_LOOKUP_MASK_REF),
654         /** return item w/o refcount if existed, otherwise add */
655         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
656                                        CFS_HS_LOOKUP_MASK_ADD),
657         /** return item with refcount if existed, otherwise add */
658         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
659                                        CFS_HS_LOOKUP_MASK_ADD),
660         /** delete if existed */
661         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
662                                        CFS_HS_LOOKUP_MASK_DEL)
663 };
664
665 static struct hlist_node *
666 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
667                           const void *key, struct hlist_node *hnode,
668                           enum cfs_hash_lookup_intent intent)
669
670 {
671         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
672         struct hlist_node  *ehnode;
673         struct hlist_node  *match;
674         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
675
676         /* with this function, we can avoid a lot of useless refcount ops,
677          * which are expensive atomic operations most time. */
678         match = intent_add ? NULL : hnode;
679         hlist_for_each(ehnode, hhead) {
680                 if (!cfs_hash_keycmp(hs, key, ehnode))
681                         continue;
682
683                 if (match != NULL && match != ehnode) /* can't match */
684                         continue;
685
686                 /* match and ... */
687                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
688                         cfs_hash_bd_del_locked(hs, bd, ehnode);
689                         return ehnode;
690                 }
691
692                 /* caller wants refcount? */
693                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
694                         cfs_hash_get(hs, ehnode);
695                 return ehnode;
696         }
697         /* no match item */
698         if (!intent_add)
699                 return NULL;
700
701         LASSERT(hnode != NULL);
702         cfs_hash_bd_add_locked(hs, bd, hnode);
703         return hnode;
704 }
705
706 struct hlist_node *
707 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
708                           const void *key)
709 {
710         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
711                                         CFS_HS_LOOKUP_IT_FIND);
712 }
713 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
714
715 struct hlist_node *
716 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
717                         const void *key)
718 {
719         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
720                                         CFS_HS_LOOKUP_IT_PEEK);
721 }
722 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
723
724 static void
725 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
726                        unsigned n, int excl)
727 {
728         struct cfs_hash_bucket *prev = NULL;
729         int                i;
730
731         /**
732          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
733          * NB: it's possible that several bds point to the same bucket but
734          * have different bd::bd_offset, so need take care of deadlock.
735          */
736         cfs_hash_for_each_bd(bds, n, i) {
737                 if (prev == bds[i].bd_bucket)
738                         continue;
739
740                 LASSERT(prev == NULL ||
741                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
742                 cfs_hash_bd_lock(hs, &bds[i], excl);
743                 prev = bds[i].bd_bucket;
744         }
745 }
746
747 static void
748 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
749                          unsigned n, int excl)
750 {
751         struct cfs_hash_bucket *prev = NULL;
752         int                i;
753
754         cfs_hash_for_each_bd(bds, n, i) {
755                 if (prev != bds[i].bd_bucket) {
756                         cfs_hash_bd_unlock(hs, &bds[i], excl);
757                         prev = bds[i].bd_bucket;
758                 }
759         }
760 }
761
762 static struct hlist_node *
763 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
764                                 unsigned n, const void *key)
765 {
766         struct hlist_node *ehnode;
767         unsigned          i;
768
769         cfs_hash_for_each_bd(bds, n, i) {
770                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
771                                                         CFS_HS_LOOKUP_IT_FIND);
772                 if (ehnode != NULL)
773                         return ehnode;
774         }
775         return NULL;
776 }
777
778 static struct hlist_node *
779 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
780                                  unsigned n, const void *key,
781                                  struct hlist_node *hnode, int noref)
782 {
783         struct hlist_node *ehnode;
784         int               intent;
785         unsigned          i;
786
787         LASSERT(hnode != NULL);
788         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
789
790         cfs_hash_for_each_bd(bds, n, i) {
791                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
792                                                    NULL, intent);
793                 if (ehnode != NULL)
794                         return ehnode;
795         }
796
797         if (i == 1) { /* only one bucket */
798                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
799         } else {
800                 struct cfs_hash_bd      mybd;
801
802                 cfs_hash_bd_get(hs, key, &mybd);
803                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
804         }
805
806         return hnode;
807 }
808
809 static struct hlist_node *
810 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
811                                  unsigned n, const void *key,
812                                  struct hlist_node *hnode)
813 {
814         struct hlist_node *ehnode;
815         unsigned           i;
816
817         cfs_hash_for_each_bd(bds, n, i) {
818                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
819                                                    CFS_HS_LOOKUP_IT_FINDDEL);
820                 if (ehnode != NULL)
821                         return ehnode;
822         }
823         return NULL;
824 }
825
826 static void
827 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
828 {
829         int     rc;
830
831         if (bd2->bd_bucket == NULL)
832                 return;
833
834         if (bd1->bd_bucket == NULL) {
835                 *bd1 = *bd2;
836                 bd2->bd_bucket = NULL;
837                 return;
838         }
839
840         rc = cfs_hash_bd_compare(bd1, bd2);
841         if (rc == 0) {
842                 bd2->bd_bucket = NULL;
843
844         } else if (rc > 0) {
845                 swap(*bd1, *bd2); /* swab bd1 and bd2 */
846         }
847 }
848
849 void
850 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
851                      struct cfs_hash_bd *bds)
852 {
853         /* NB: caller should hold hs_lock.rw if REHASH is set */
854         cfs_hash_bd_from_key(hs, hs->hs_buckets,
855                              hs->hs_cur_bits, key, &bds[0]);
856         if (likely(hs->hs_rehash_buckets == NULL)) {
857                 /* no rehash or not rehashing */
858                 bds[1].bd_bucket = NULL;
859                 return;
860         }
861
862         LASSERT(hs->hs_rehash_bits != 0);
863         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
864                              hs->hs_rehash_bits, key, &bds[1]);
865
866         cfs_hash_bd_order(&bds[0], &bds[1]);
867 }
868
869 void
870 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
871 {
872         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
873 }
874
875 void
876 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
877 {
878         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
879 }
880
881 struct hlist_node *
882 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
883                                const void *key)
884 {
885         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
886 }
887
888 struct hlist_node *
889 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
890                                 const void *key, struct hlist_node *hnode,
891                                 int noref)
892 {
893         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
894                                                 hnode, noref);
895 }
896
897 struct hlist_node *
898 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
899                                 const void *key, struct hlist_node *hnode)
900 {
901         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
902 }
903
904 static void
905 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
906                       int bkt_size, int prev_size, int size)
907 {
908         int     i;
909
910         for (i = prev_size; i < size; i++) {
911                 if (buckets[i] != NULL)
912                         LIBCFS_FREE(buckets[i], bkt_size);
913         }
914
915         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
916 }
917
918 /*
919  * Create or grow bucket memory. Return old_buckets if no allocation was
920  * needed, the newly allocated buckets if allocation was needed and
921  * successful, and NULL on error.
922  */
923 static struct cfs_hash_bucket **
924 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
925                          unsigned int old_size, unsigned int new_size)
926 {
927         struct cfs_hash_bucket **new_bkts;
928         int                 i;
929
930         LASSERT(old_size == 0 || old_bkts != NULL);
931
932         if (old_bkts != NULL && old_size == new_size)
933                 return old_bkts;
934
935         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
936         if (new_bkts == NULL)
937                 return NULL;
938
939         if (old_bkts != NULL) {
940                 memcpy(new_bkts, old_bkts,
941                        min(old_size, new_size) * sizeof(*old_bkts));
942         }
943
944         for (i = old_size; i < new_size; i++) {
945                 struct hlist_head *hhead;
946                 struct cfs_hash_bd     bd;
947
948                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
949                 if (new_bkts[i] == NULL) {
950                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
951                                               old_size, new_size);
952                         return NULL;
953                 }
954
955                 new_bkts[i]->hsb_index   = i;
956                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
957                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
958                 bd.bd_bucket = new_bkts[i];
959                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
960                         INIT_HLIST_HEAD(hhead);
961
962                 if (cfs_hash_with_no_lock(hs) ||
963                     cfs_hash_with_no_bktlock(hs))
964                         continue;
965
966                 if (cfs_hash_with_rw_bktlock(hs))
967                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
968                 else if (cfs_hash_with_spin_bktlock(hs))
969                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
970                 else if (cfs_hash_with_rw_sem_bktlock(hs))
971                         init_rwsem(&new_bkts[i]->hsb_lock.rw_sem);
972                 else
973                         LBUG(); /* invalid use-case */
974         }
975         return new_bkts;
976 }
977
978 /**
979  * Initialize new libcfs hash, where:
980  * @name     - Descriptive hash name
981  * @cur_bits - Initial hash table size, in bits
982  * @max_bits - Maximum allowed hash table resize, in bits
983  * @ops      - Registered hash table operations
984  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
985  *           - CFS_HASH_SORT enable chained hash sort
986  */
987 static void cfs_hash_rehash_worker(struct work_struct *work);
988
989 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
990 static void cfs_hash_dep_print(struct work_struct *work)
991 {
992         struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
993         int         dep;
994         int         bkt;
995         int         off;
996         int         bits;
997
998         spin_lock(&hs->hs_dep_lock);
999         dep  = hs->hs_dep_max;
1000         bkt  = hs->hs_dep_bkt;
1001         off  = hs->hs_dep_off;
1002         bits = hs->hs_dep_bits;
1003         spin_unlock(&hs->hs_dep_lock);
1004
1005         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
1006                       hs->hs_name, bits, dep, bkt, off);
1007         spin_lock(&hs->hs_dep_lock);
1008         hs->hs_dep_bits = 0; /* mark as workitem done */
1009         spin_unlock(&hs->hs_dep_lock);
1010         return 0;
1011 }
1012
1013 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
1014 {
1015         spin_lock_init(&hs->hs_dep_lock);
1016         INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
1017 }
1018
1019 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1020 {
1021         cancel_work_sync(&hs->hs_dep_work);
1022 }
1023
1024 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1025
1026 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1027 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1028
1029 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1030
1031 struct cfs_hash *
1032 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1033                 unsigned bkt_bits, unsigned extra_bytes,
1034                 unsigned min_theta, unsigned max_theta,
1035                 struct cfs_hash_ops *ops, unsigned flags)
1036 {
1037         struct cfs_hash *hs;
1038         int         len;
1039
1040         ENTRY;
1041
1042         BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
1043
1044         LASSERT(name != NULL);
1045         LASSERT(ops != NULL);
1046         LASSERT(ops->hs_key);
1047         LASSERT(ops->hs_hash);
1048         LASSERT(ops->hs_object);
1049         LASSERT(ops->hs_keycmp);
1050         LASSERT(ops->hs_get != NULL);
1051         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1052
1053         if ((flags & CFS_HASH_REHASH) != 0)
1054                 flags |= CFS_HASH_COUNTER; /* must have counter */
1055
1056         LASSERT(cur_bits > 0);
1057         LASSERT(cur_bits >= bkt_bits);
1058         LASSERT(max_bits >= cur_bits && max_bits < 31);
1059         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1060         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1061                      (flags & CFS_HASH_NO_LOCK) == 0));
1062         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1063                       ops->hs_keycpy != NULL));
1064
1065         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1066               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1067         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1068         if (hs == NULL)
1069                 RETURN(NULL);
1070
1071         strlcpy(hs->hs_name, name, len);
1072         hs->hs_flags = flags;
1073
1074         atomic_set(&hs->hs_refcount, 1);
1075         atomic_set(&hs->hs_count, 0);
1076
1077         cfs_hash_lock_setup(hs);
1078         cfs_hash_hlist_setup(hs);
1079
1080         hs->hs_cur_bits = (__u8)cur_bits;
1081         hs->hs_min_bits = (__u8)cur_bits;
1082         hs->hs_max_bits = (__u8)max_bits;
1083         hs->hs_bkt_bits = (__u8)bkt_bits;
1084
1085         hs->hs_ops         = ops;
1086         hs->hs_extra_bytes = extra_bytes;
1087         hs->hs_rehash_bits = 0;
1088         INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
1089         cfs_hash_depth_wi_init(hs);
1090
1091         if (cfs_hash_with_rehash(hs))
1092                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1093
1094         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1095                                                   CFS_HASH_NBKT(hs));
1096         if (hs->hs_buckets != NULL)
1097                 return hs;
1098
1099         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1100         RETURN(NULL);
1101 }
1102 EXPORT_SYMBOL(cfs_hash_create);
1103
1104 /**
1105  * Cleanup libcfs hash @hs.
1106  */
1107 static void
1108 cfs_hash_destroy(struct cfs_hash *hs)
1109 {
1110         struct hlist_node     *hnode;
1111         struct hlist_node     *pos;
1112         struct cfs_hash_bd         bd;
1113         int                   i;
1114         ENTRY;
1115
1116         LASSERT(hs != NULL);
1117         LASSERT(!cfs_hash_is_exiting(hs) &&
1118                 !cfs_hash_is_iterating(hs));
1119
1120         /**
1121          * prohibit further rehashes, don't need any lock because
1122          * I'm the only (last) one can change it.
1123          */
1124         hs->hs_exiting = 1;
1125         if (cfs_hash_with_rehash(hs))
1126                 cfs_hash_rehash_cancel(hs);
1127
1128         cfs_hash_depth_wi_cancel(hs);
1129         /* rehash should be done/canceled */
1130         LASSERT(hs->hs_buckets != NULL &&
1131                 hs->hs_rehash_buckets == NULL);
1132
1133         cfs_hash_for_each_bucket(hs, &bd, i) {
1134                 struct hlist_head *hhead;
1135
1136                 LASSERT(bd.bd_bucket != NULL);
1137                 /* no need to take this lock, just for consistent code */
1138                 cfs_hash_bd_lock(hs, &bd, 1);
1139
1140                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1141                         hlist_for_each_safe(hnode, pos, hhead) {
1142                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1143                                         "hash %s bucket %u(%u) is not "
1144                                         " empty: %u items left\n",
1145                                         hs->hs_name, bd.bd_bucket->hsb_index,
1146                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1147                                 /* can't assert key valicate, because we
1148                                  * can interrupt rehash */
1149                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1150                                 cfs_hash_exit(hs, hnode);
1151                         }
1152                 }
1153                 LASSERT(bd.bd_bucket->hsb_count == 0);
1154                 cfs_hash_bd_unlock(hs, &bd, 1);
1155                 cond_resched();
1156         }
1157
1158         LASSERT(atomic_read(&hs->hs_count) == 0);
1159
1160         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1161                               0, CFS_HASH_NBKT(hs));
1162         i = cfs_hash_with_bigname(hs) ?
1163             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1164         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1165
1166         EXIT;
1167 }
1168
1169 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1170 {
1171         if (atomic_inc_not_zero(&hs->hs_refcount))
1172                 return hs;
1173         return NULL;
1174 }
1175 EXPORT_SYMBOL(cfs_hash_getref);
1176
1177 void cfs_hash_putref(struct cfs_hash *hs)
1178 {
1179         if (atomic_dec_and_test(&hs->hs_refcount))
1180                 cfs_hash_destroy(hs);
1181 }
1182 EXPORT_SYMBOL(cfs_hash_putref);
1183
1184 static inline int
1185 cfs_hash_rehash_bits(struct cfs_hash *hs)
1186 {
1187         if (cfs_hash_with_no_lock(hs) ||
1188             !cfs_hash_with_rehash(hs))
1189                 return -EOPNOTSUPP;
1190
1191         if (unlikely(cfs_hash_is_exiting(hs)))
1192                 return -ESRCH;
1193
1194         if (unlikely(cfs_hash_is_rehashing(hs)))
1195                 return -EALREADY;
1196
1197         if (unlikely(cfs_hash_is_iterating(hs)))
1198                 return -EAGAIN;
1199
1200         /* XXX: need to handle case with max_theta != 2.0
1201          *      and the case with min_theta != 0.5 */
1202         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1203             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1204                 return hs->hs_cur_bits + 1;
1205
1206         if (!cfs_hash_with_shrink(hs))
1207                 return 0;
1208
1209         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1210             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1211                 return hs->hs_cur_bits - 1;
1212
1213         return 0;
1214 }
1215
1216 /**
1217  * don't allow inline rehash if:
1218  * - user wants non-blocking change (add/del) on hash table
1219  * - too many elements
1220  */
1221 static inline int
1222 cfs_hash_rehash_inline(struct cfs_hash *hs)
1223 {
1224         return !cfs_hash_with_nblk_change(hs) &&
1225                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1226 }
1227
1228 /**
1229  * Add item @hnode to libcfs hash @hs using @key.  The registered
1230  * ops->hs_get function will be called when the item is added.
1231  */
1232 void
1233 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1234 {
1235         struct cfs_hash_bd   bd;
1236         int             bits;
1237
1238         LASSERT(hlist_unhashed(hnode));
1239
1240         cfs_hash_lock(hs, 0);
1241         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1242
1243         cfs_hash_key_validate(hs, key, hnode);
1244         cfs_hash_bd_add_locked(hs, &bd, hnode);
1245
1246         cfs_hash_bd_unlock(hs, &bd, 1);
1247
1248         bits = cfs_hash_rehash_bits(hs);
1249         cfs_hash_unlock(hs, 0);
1250         if (bits > 0)
1251                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1252 }
1253 EXPORT_SYMBOL(cfs_hash_add);
1254
1255 static struct hlist_node *
1256 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1257                      struct hlist_node *hnode, int noref)
1258 {
1259         struct hlist_node *ehnode;
1260         struct cfs_hash_bd     bds[2];
1261         int               bits = 0;
1262
1263         LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1264
1265         cfs_hash_lock(hs, 0);
1266         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1267
1268         cfs_hash_key_validate(hs, key, hnode);
1269         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1270                                                  hnode, noref);
1271         cfs_hash_dual_bd_unlock(hs, bds, 1);
1272
1273         if (ehnode == hnode) /* new item added */
1274                 bits = cfs_hash_rehash_bits(hs);
1275         cfs_hash_unlock(hs, 0);
1276         if (bits > 0)
1277                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1278
1279         return ehnode;
1280 }
1281
1282 /**
1283  * Add item @hnode to libcfs hash @hs using @key.  The registered
1284  * ops->hs_get function will be called if the item was added.
1285  * Returns 0 on success or -EALREADY on key collisions.
1286  */
1287 int
1288 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1289                     struct hlist_node *hnode)
1290 {
1291         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1292                -EALREADY : 0;
1293 }
1294 EXPORT_SYMBOL(cfs_hash_add_unique);
1295
1296 /**
1297  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1298  * already exists in the hash then ops->hs_get will be called on the
1299  * conflicting entry and that entry will be returned to the caller.
1300  * Otherwise ops->hs_get is called on the item which was added.
1301  */
1302 void *
1303 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1304                         struct hlist_node *hnode)
1305 {
1306         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1307
1308         return cfs_hash_object(hs, hnode);
1309 }
1310 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1311
1312 /**
1313  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1314  * is required to ensure the correct hash bucket is locked since there
1315  * is no direct linkage from the item to the bucket.  The object
1316  * removed from the hash will be returned and obs->hs_put is called
1317  * on the removed object.
1318  */
1319 void *
1320 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1321 {
1322         void           *obj  = NULL;
1323         int             bits = 0;
1324         struct cfs_hash_bd   bds[2];
1325
1326         cfs_hash_lock(hs, 0);
1327         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1328
1329         /* NB: do nothing if @hnode is not in hash table */
1330         if (hnode == NULL || !hlist_unhashed(hnode)) {
1331                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1332                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1333                 } else {
1334                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1335                                                                 key, hnode);
1336                 }
1337         }
1338
1339         if (hnode != NULL) {
1340                 obj  = cfs_hash_object(hs, hnode);
1341                 bits = cfs_hash_rehash_bits(hs);
1342         }
1343
1344         cfs_hash_dual_bd_unlock(hs, bds, 1);
1345         cfs_hash_unlock(hs, 0);
1346         if (bits > 0)
1347                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1348
1349         return obj;
1350 }
1351 EXPORT_SYMBOL(cfs_hash_del);
1352
1353 /**
1354  * Delete item given @key in libcfs hash @hs.  The first @key found in
1355  * the hash will be removed, if the key exists multiple times in the hash
1356  * @hs this function must be called once per key.  The removed object
1357  * will be returned and ops->hs_put is called on the removed object.
1358  */
1359 void *
1360 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1361 {
1362         return cfs_hash_del(hs, key, NULL);
1363 }
1364 EXPORT_SYMBOL(cfs_hash_del_key);
1365
1366 /**
1367  * Lookup an item using @key in the libcfs hash @hs and return it.
1368  * If the @key is found in the hash hs->hs_get() is called and the
1369  * matching objects is returned.  It is the callers responsibility
1370  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1371  * when when finished with the object.  If the @key was not found
1372  * in the hash @hs NULL is returned.
1373  */
1374 void *
1375 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1376 {
1377         void                 *obj = NULL;
1378         struct hlist_node     *hnode;
1379         struct cfs_hash_bd         bds[2];
1380
1381         cfs_hash_lock(hs, 0);
1382         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1383
1384         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1385         if (hnode != NULL)
1386                 obj = cfs_hash_object(hs, hnode);
1387
1388         cfs_hash_dual_bd_unlock(hs, bds, 0);
1389         cfs_hash_unlock(hs, 0);
1390
1391         return obj;
1392 }
1393 EXPORT_SYMBOL(cfs_hash_lookup);
1394
1395 static void
1396 cfs_hash_for_each_enter(struct cfs_hash *hs)
1397 {
1398         LASSERT(!cfs_hash_is_exiting(hs));
1399
1400         if (!cfs_hash_with_rehash(hs))
1401                 return;
1402         /*
1403          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1404          * because it's just an unreliable signal to rehash-thread,
1405          * rehash-thread will try to finish rehash ASAP when seeing this.
1406          */
1407         hs->hs_iterating = 1;
1408
1409         cfs_hash_lock(hs, 1);
1410         hs->hs_iterators++;
1411         cfs_hash_unlock(hs, 1);
1412
1413         /* NB: iteration is mostly called by service thread,
1414          * we tend to cancel pending rehash-request, instead of
1415          * blocking service thread, we will relaunch rehash request
1416          * after iteration
1417          */
1418         if (cfs_hash_is_rehashing(hs))
1419                 cfs_hash_rehash_cancel(hs);
1420 }
1421
1422 static void
1423 cfs_hash_for_each_exit(struct cfs_hash *hs)
1424 {
1425         int remained;
1426         int bits;
1427
1428         if (!cfs_hash_with_rehash(hs))
1429                 return;
1430         cfs_hash_lock(hs, 1);
1431         remained = --hs->hs_iterators;
1432         bits = cfs_hash_rehash_bits(hs);
1433         cfs_hash_unlock(hs, 1);
1434         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1435         if (remained == 0)
1436                 hs->hs_iterating = 0;
1437         if (bits > 0) {
1438                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1439                                     CFS_HASH_LOOP_HOG);
1440         }
1441 }
1442
1443 /**
1444  * For each item in the libcfs hash @hs call the passed callback @func
1445  * and pass to it as an argument each hash item and the private @data.
1446  *
1447  * a) the function may sleep!
1448  * b) during the callback:
1449  *    . the bucket lock is held so the callback must never sleep.
1450  *    . if @removal_safe is true, use can remove current item by
1451  *      cfs_hash_bd_del_locked
1452  */
1453 static __u64
1454 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1455                         void *data, int remove_safe)
1456 {
1457         struct hlist_node       *hnode;
1458         struct hlist_node       *pos;
1459         struct cfs_hash_bd      bd;
1460         __u64                   count = 0;
1461         int                     excl  = !!remove_safe;
1462         int                     loop  = 0;
1463         int                     i;
1464         ENTRY;
1465
1466         cfs_hash_for_each_enter(hs);
1467
1468         cfs_hash_lock(hs, 0);
1469         LASSERT(!cfs_hash_is_rehashing(hs));
1470
1471         cfs_hash_for_each_bucket(hs, &bd, i) {
1472                 struct hlist_head *hhead;
1473
1474                 cfs_hash_bd_lock(hs, &bd, excl);
1475                 if (func == NULL) { /* only glimpse size */
1476                         count += bd.bd_bucket->hsb_count;
1477                         cfs_hash_bd_unlock(hs, &bd, excl);
1478                         continue;
1479                 }
1480
1481                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1482                         hlist_for_each_safe(hnode, pos, hhead) {
1483                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1484                                 count++;
1485                                 loop++;
1486                                 if (func(hs, &bd, hnode, data)) {
1487                                         cfs_hash_bd_unlock(hs, &bd, excl);
1488                                         goto out;
1489                                 }
1490                         }
1491                 }
1492                 cfs_hash_bd_unlock(hs, &bd, excl);
1493                 if (loop < CFS_HASH_LOOP_HOG)
1494                         continue;
1495                 loop = 0;
1496                 cfs_hash_unlock(hs, 0);
1497                 cond_resched();
1498                 cfs_hash_lock(hs, 0);
1499         }
1500  out:
1501         cfs_hash_unlock(hs, 0);
1502
1503         cfs_hash_for_each_exit(hs);
1504         RETURN(count);
1505 }
1506
1507 struct cfs_hash_cond_arg {
1508         cfs_hash_cond_opt_cb_t  func;
1509         void                   *arg;
1510 };
1511
1512 static int
1513 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1514                          struct hlist_node *hnode, void *data)
1515 {
1516         struct cfs_hash_cond_arg *cond = data;
1517
1518         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1519                 cfs_hash_bd_del_locked(hs, bd, hnode);
1520         return 0;
1521 }
1522
1523 /**
1524  * Delete item from the libcfs hash @hs when @func return true.
1525  * The write lock being hold during loop for each bucket to avoid
1526  * any object be reference.
1527  */
1528 void
1529 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1530 {
1531         struct cfs_hash_cond_arg arg = {
1532                 .func   = func,
1533                 .arg    = data,
1534         };
1535
1536         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1537 }
1538 EXPORT_SYMBOL(cfs_hash_cond_del);
1539
1540 void
1541 cfs_hash_for_each(struct cfs_hash *hs,
1542                   cfs_hash_for_each_cb_t func, void *data)
1543 {
1544         cfs_hash_for_each_tight(hs, func, data, 0);
1545 }
1546 EXPORT_SYMBOL(cfs_hash_for_each);
1547
1548 void
1549 cfs_hash_for_each_safe(struct cfs_hash *hs,
1550                        cfs_hash_for_each_cb_t func, void *data)
1551 {
1552         cfs_hash_for_each_tight(hs, func, data, 1);
1553 }
1554 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1555
1556 static int
1557 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1558               struct hlist_node *hnode, void *data)
1559 {
1560         *(int *)data = 0;
1561         return 1; /* return 1 to break the loop */
1562 }
1563
1564 int
1565 cfs_hash_is_empty(struct cfs_hash *hs)
1566 {
1567         int empty = 1;
1568
1569         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1570         return empty;
1571 }
1572 EXPORT_SYMBOL(cfs_hash_is_empty);
1573
1574 __u64
1575 cfs_hash_size_get(struct cfs_hash *hs)
1576 {
1577         return cfs_hash_with_counter(hs) ?
1578                atomic_read(&hs->hs_count) :
1579                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1580 }
1581 EXPORT_SYMBOL(cfs_hash_size_get);
1582
1583 /*
1584  * cfs_hash_for_each_relax:
1585  * Iterate the hash table and call @func on each item without
1586  * any lock. This function can't guarantee to finish iteration
1587  * if these features are enabled:
1588  *
1589  *  a. if rehash_key is enabled, an item can be moved from
1590  *     one bucket to another bucket
1591  *  b. user can remove non-zero-ref item from hash-table,
1592  *     so the item can be removed from hash-table, even worse,
1593  *     it's possible that user changed key and insert to another
1594  *     hash bucket.
1595  * there's no way for us to finish iteration correctly on previous
1596  * two cases, so iteration has to be stopped on change.
1597  */
1598 static int
1599 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1600                         void *data, int start)
1601 {
1602         struct hlist_node       *hnode;
1603         struct hlist_node       *next = NULL;
1604         struct cfs_hash_bd      bd;
1605         __u32                   version;
1606         int                     count = 0;
1607         int                     stop_on_change;
1608         int                     has_put_locked;
1609         int                     rc = 0;
1610         int                     i, end = -1;
1611         ENTRY;
1612
1613         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1614                          !cfs_hash_with_no_itemref(hs);
1615         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1616         cfs_hash_lock(hs, 0);
1617 again:
1618         LASSERT(!cfs_hash_is_rehashing(hs));
1619
1620         cfs_hash_for_each_bucket(hs, &bd, i) {
1621                 struct hlist_head *hhead;
1622
1623                 if (i < start)
1624                         continue;
1625                 else if (end > 0 && i >= end)
1626                         break;
1627
1628                 cfs_hash_bd_lock(hs, &bd, 0);
1629                 version = cfs_hash_bd_version_get(&bd);
1630
1631                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1632                         hnode = hhead->first;
1633                         if (hnode == NULL)
1634                                 continue;
1635                         cfs_hash_get(hs, hnode);
1636                         for (; hnode != NULL; hnode = next) {
1637                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1638                                 next = hnode->next;
1639                                 if (next != NULL)
1640                                         cfs_hash_get(hs, next);
1641                                 cfs_hash_bd_unlock(hs, &bd, 0);
1642                                 cfs_hash_unlock(hs, 0);
1643
1644                                 rc = func(hs, &bd, hnode, data);
1645                                 if (stop_on_change || !has_put_locked)
1646                                         cfs_hash_put(hs, hnode);
1647
1648                                 cond_resched();
1649                                 count++;
1650
1651                                 cfs_hash_lock(hs, 0);
1652                                 cfs_hash_bd_lock(hs, &bd, 0);
1653                                 if (stop_on_change) {
1654                                         if (version !=
1655                                             cfs_hash_bd_version_get(&bd))
1656                                                 rc = -EINTR;
1657                                 } else if (has_put_locked) {
1658                                         cfs_hash_put_locked(hs, hnode);
1659                                 }
1660                                 if (rc) /* callback wants to break iteration */
1661                                         break;
1662                         }
1663                         if (next != NULL) {
1664                                 if (has_put_locked) {
1665                                         cfs_hash_put_locked(hs, next);
1666                                         next = NULL;
1667                                 }
1668                                 break;
1669                         } else if (rc != 0) {
1670                                 break;
1671                         }
1672                 }
1673                 cfs_hash_bd_unlock(hs, &bd, 0);
1674                 if (next != NULL && !has_put_locked) {
1675                         cfs_hash_put(hs, next);
1676                         next = NULL;
1677                 }
1678                 if (rc) /* callback wants to break iteration */
1679                         break;
1680         }
1681
1682         if (start > 0 && rc == 0) {
1683                 end = start;
1684                 start = 0;
1685                 goto again;
1686         }
1687
1688         cfs_hash_unlock(hs, 0);
1689         return count;
1690 }
1691
1692 int
1693 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1694                          cfs_hash_for_each_cb_t func, void *data, int start)
1695 {
1696         ENTRY;
1697
1698         if (cfs_hash_with_no_lock(hs) ||
1699             cfs_hash_with_rehash_key(hs) ||
1700             !cfs_hash_with_no_itemref(hs))
1701                 RETURN(-EOPNOTSUPP);
1702
1703         if (hs->hs_ops->hs_get == NULL ||
1704            (hs->hs_ops->hs_put == NULL &&
1705             hs->hs_ops->hs_put_locked == NULL))
1706                 RETURN(-EOPNOTSUPP);
1707
1708         cfs_hash_for_each_enter(hs);
1709         cfs_hash_for_each_relax(hs, func, data, start);
1710         cfs_hash_for_each_exit(hs);
1711
1712         RETURN(0);
1713 }
1714 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1715
1716 /**
1717  * For each hash bucket in the libcfs hash @hs call the passed callback
1718  * @func until all the hash buckets are empty.  The passed callback @func
1719  * or the previously registered callback hs->hs_put must remove the item
1720  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1721  * functions.  No rwlocks will be held during the callback @func it is
1722  * safe to sleep if needed.  This function will not terminate until the
1723  * hash is empty.  Note it is still possible to concurrently add new
1724  * items in to the hash.  It is the callers responsibility to ensure
1725  * the required locking is in place to prevent concurrent insertions.
1726  */
1727 int
1728 cfs_hash_for_each_empty(struct cfs_hash *hs,
1729                         cfs_hash_for_each_cb_t func, void *data)
1730 {
1731         unsigned  i = 0;
1732         ENTRY;
1733
1734         if (cfs_hash_with_no_lock(hs))
1735                 return -EOPNOTSUPP;
1736
1737         if (hs->hs_ops->hs_get == NULL ||
1738            (hs->hs_ops->hs_put == NULL &&
1739             hs->hs_ops->hs_put_locked == NULL))
1740                 return -EOPNOTSUPP;
1741
1742         cfs_hash_for_each_enter(hs);
1743         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1744                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1745                        hs->hs_name, i++);
1746         }
1747         cfs_hash_for_each_exit(hs);
1748         RETURN(0);
1749 }
1750 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1751
1752 void
1753 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1754                         cfs_hash_for_each_cb_t func, void *data)
1755 {
1756         struct hlist_head *hhead;
1757         struct hlist_node *hnode;
1758         struct cfs_hash_bd         bd;
1759
1760         cfs_hash_for_each_enter(hs);
1761         cfs_hash_lock(hs, 0);
1762         if (hindex >= CFS_HASH_NHLIST(hs))
1763                 goto out;
1764
1765         cfs_hash_bd_index_set(hs, hindex, &bd);
1766
1767         cfs_hash_bd_lock(hs, &bd, 0);
1768         hhead = cfs_hash_bd_hhead(hs, &bd);
1769         hlist_for_each(hnode, hhead) {
1770                 if (func(hs, &bd, hnode, data))
1771                         break;
1772         }
1773         cfs_hash_bd_unlock(hs, &bd, 0);
1774 out:
1775         cfs_hash_unlock(hs, 0);
1776         cfs_hash_for_each_exit(hs);
1777 }
1778
1779 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1780
1781 /*
1782  * For each item in the libcfs hash @hs which matches the @key call
1783  * the passed callback @func and pass to it as an argument each hash
1784  * item and the private @data. During the callback the bucket lock
1785  * is held so the callback must never sleep.
1786    */
1787 void
1788 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1789                         cfs_hash_for_each_cb_t func, void *data)
1790 {
1791         struct hlist_node *hnode;
1792         struct cfs_hash_bd         bds[2];
1793         unsigned           i;
1794
1795         cfs_hash_lock(hs, 0);
1796
1797         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1798
1799         cfs_hash_for_each_bd(bds, 2, i) {
1800                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1801
1802                 hlist_for_each(hnode, hlist) {
1803                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1804
1805                         if (cfs_hash_keycmp(hs, key, hnode)) {
1806                                 if (func(hs, &bds[i], hnode, data))
1807                                         break;
1808                         }
1809                 }
1810         }
1811
1812         cfs_hash_dual_bd_unlock(hs, bds, 0);
1813         cfs_hash_unlock(hs, 0);
1814 }
1815 EXPORT_SYMBOL(cfs_hash_for_each_key);
1816
1817 /**
1818  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1819  * to grow the hash size when excessive chaining is detected, or to
1820  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1821  * flag is set in @hs the libcfs hash may be dynamically rehashed
1822  * during addition or removal if the hash's theta value exceeds
1823  * either the hs->hs_min_theta or hs->max_theta values.  By default
1824  * these values are tuned to keep the chained hash depth small, and
1825  * this approach assumes a reasonably uniform hashing function.  The
1826  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1827  */
1828 void
1829 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1830 {
1831         LASSERT(hs->hs_iterators > 0 || hs->hs_exiting);
1832         while (cfs_hash_is_rehashing(hs)) {
1833                 if (cancel_work_sync(&hs->hs_rehash_work)) {
1834                         cfs_hash_lock(hs, 1);
1835                         hs->hs_rehash_bits = 0;
1836                         cfs_hash_unlock(hs, 1);
1837                 }
1838                 cond_resched();
1839         }
1840 }
1841
1842 void
1843 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1844 {
1845         int     rc;
1846
1847         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1848
1849         cfs_hash_lock(hs, 1);
1850
1851         rc = cfs_hash_rehash_bits(hs);
1852         if (rc <= 0) {
1853                 cfs_hash_unlock(hs, 1);
1854                 return;
1855         }
1856
1857         hs->hs_rehash_bits = rc;
1858         if (!do_rehash) {
1859                 /* launch and return */
1860                 queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
1861                 cfs_hash_unlock(hs, 1);
1862                 return;
1863         }
1864
1865         /* rehash right now */
1866         cfs_hash_unlock(hs, 1);
1867
1868         cfs_hash_rehash_worker(&hs->hs_rehash_work);
1869 }
1870
1871 static int
1872 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1873 {
1874         struct cfs_hash_bd      new;
1875         struct hlist_head *hhead;
1876         struct hlist_node *hnode;
1877         struct hlist_node *pos;
1878         void              *key;
1879         int                c = 0;
1880
1881         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1882         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1883                 hlist_for_each_safe(hnode, pos, hhead) {
1884                         key = cfs_hash_key(hs, hnode);
1885                         LASSERT(key != NULL);
1886                         /* Validate hnode is in the correct bucket. */
1887                         cfs_hash_bucket_validate(hs, old, hnode);
1888                         /*
1889                          * Delete from old hash bucket; move to new bucket.
1890                          * ops->hs_key must be defined.
1891                          */
1892                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1893                                              hs->hs_rehash_bits, key, &new);
1894                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1895                         c++;
1896                 }
1897         }
1898         return c;
1899 }
1900
1901 static void
1902 cfs_hash_rehash_worker(struct work_struct *work)
1903 {
1904         struct cfs_hash *hs = container_of(work, struct cfs_hash,
1905                                            hs_rehash_work);
1906         struct cfs_hash_bucket **bkts;
1907         struct cfs_hash_bd      bd;
1908         unsigned int            old_size;
1909         unsigned int            new_size;
1910         int                     bsize;
1911         int                     count = 0;
1912         int                     rc = 0;
1913         int                     i;
1914
1915         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1916
1917         cfs_hash_lock(hs, 0);
1918         LASSERT(cfs_hash_is_rehashing(hs));
1919
1920         old_size = CFS_HASH_NBKT(hs);
1921         new_size = CFS_HASH_RH_NBKT(hs);
1922
1923         cfs_hash_unlock(hs, 0);
1924
1925         /*
1926          * don't need hs::hs_rwlock for hs::hs_buckets,
1927          * because nobody can change bkt-table except me.
1928          */
1929         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1930                                         old_size, new_size);
1931         cfs_hash_lock(hs, 1);
1932         if (bkts == NULL) {
1933                 rc = -ENOMEM;
1934                 goto out;
1935         }
1936
1937         if (bkts == hs->hs_buckets) {
1938                 bkts = NULL; /* do nothing */
1939                 goto out;
1940         }
1941
1942         rc = __cfs_hash_theta(hs);
1943         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1944                 /* free the new allocated bkt-table */
1945                 old_size = new_size;
1946                 new_size = CFS_HASH_NBKT(hs);
1947                 rc = -EALREADY;
1948                 goto out;
1949         }
1950
1951         LASSERT(hs->hs_rehash_buckets == NULL);
1952         hs->hs_rehash_buckets = bkts;
1953
1954         rc = 0;
1955         cfs_hash_for_each_bucket(hs, &bd, i) {
1956                 if (cfs_hash_is_exiting(hs)) {
1957                         rc = -ESRCH;
1958                         /* someone wants to destroy the hash, abort now */
1959                         if (old_size < new_size) /* OK to free old bkt-table */
1960                                 break;
1961                         /* it's shrinking, need free new bkt-table */
1962                         hs->hs_rehash_buckets = NULL;
1963                         old_size = new_size;
1964                         new_size = CFS_HASH_NBKT(hs);
1965                         goto out;
1966                 }
1967
1968                 count += cfs_hash_rehash_bd(hs, &bd);
1969                 if (count < CFS_HASH_LOOP_HOG ||
1970                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1971                         continue;
1972                 }
1973
1974                 count = 0;
1975                 cfs_hash_unlock(hs, 1);
1976                 cond_resched();
1977                 cfs_hash_lock(hs, 1);
1978         }
1979
1980         hs->hs_rehash_count++;
1981
1982         bkts = hs->hs_buckets;
1983         hs->hs_buckets = hs->hs_rehash_buckets;
1984         hs->hs_rehash_buckets = NULL;
1985
1986         hs->hs_cur_bits = hs->hs_rehash_bits;
1987 out:
1988         hs->hs_rehash_bits = 0;
1989         bsize = cfs_hash_bkt_size(hs);
1990         cfs_hash_unlock(hs, 1);
1991         /* can't refer to @hs anymore because it could be destroyed */
1992         if (bkts != NULL)
1993                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1994         if (rc != 0)
1995                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1996 }
1997
1998 /**
1999  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
2000  * @old_key must be provided to locate the objects previous location
2001  * in the hash, and the @new_key will be used to reinsert the object.
2002  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
2003  * combo when it is critical that there is no window in time where the
2004  * object is missing from the hash.  When an object is being rehashed
2005  * the registered cfs_hash_get() and cfs_hash_put() functions will
2006  * not be called.
2007  */
2008 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2009                          void *new_key, struct hlist_node *hnode)
2010 {
2011         struct cfs_hash_bd        bds[3];
2012         struct cfs_hash_bd        old_bds[2];
2013         struct cfs_hash_bd        new_bd;
2014
2015         LASSERT(!hlist_unhashed(hnode));
2016
2017         cfs_hash_lock(hs, 0);
2018
2019         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2020         cfs_hash_bd_get(hs, new_key, &new_bd);
2021
2022         bds[0] = old_bds[0];
2023         bds[1] = old_bds[1];
2024         bds[2] = new_bd;
2025
2026         /* NB: bds[0] and bds[1] are ordered already */
2027         cfs_hash_bd_order(&bds[1], &bds[2]);
2028         cfs_hash_bd_order(&bds[0], &bds[1]);
2029
2030         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2031         if (likely(old_bds[1].bd_bucket == NULL)) {
2032                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2033         } else {
2034                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2035                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2036         }
2037         /* overwrite key inside locks, otherwise may screw up with
2038          * other operations, i.e: rehash */
2039         cfs_hash_keycpy(hs, hnode, new_key);
2040
2041         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2042         cfs_hash_unlock(hs, 0);
2043 }
2044 EXPORT_SYMBOL(cfs_hash_rehash_key);
2045
2046 void cfs_hash_debug_header(struct seq_file *m)
2047 {
2048         seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2049                    CFS_HASH_BIGNAME_LEN, "name");
2050 }
2051 EXPORT_SYMBOL(cfs_hash_debug_header);
2052
2053 static struct cfs_hash_bucket **
2054 cfs_hash_full_bkts(struct cfs_hash *hs)
2055 {
2056         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2057         if (hs->hs_rehash_buckets == NULL)
2058                 return hs->hs_buckets;
2059
2060         LASSERT(hs->hs_rehash_bits != 0);
2061         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2062                hs->hs_rehash_buckets : hs->hs_buckets;
2063 }
2064
2065 static unsigned int
2066 cfs_hash_full_nbkt(struct cfs_hash *hs)
2067 {
2068         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2069         if (hs->hs_rehash_buckets == NULL)
2070                 return CFS_HASH_NBKT(hs);
2071
2072         LASSERT(hs->hs_rehash_bits != 0);
2073         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2074                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2075 }
2076
2077 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2078 {
2079         int dist[8] = { 0, };
2080         int maxdep = -1;
2081         int maxdepb = -1;
2082         int total = 0;
2083         int theta;
2084         int i;
2085
2086         cfs_hash_lock(hs, 0);
2087         theta = __cfs_hash_theta(hs);
2088
2089         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2090                    CFS_HASH_BIGNAME_LEN, hs->hs_name,
2091                    1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2092                    1 << hs->hs_max_bits,
2093                    __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2094                    __cfs_hash_theta_int(hs->hs_min_theta),
2095                    __cfs_hash_theta_frac(hs->hs_min_theta),
2096                    __cfs_hash_theta_int(hs->hs_max_theta),
2097                    __cfs_hash_theta_frac(hs->hs_max_theta),
2098                    hs->hs_flags, hs->hs_rehash_count);
2099
2100         /*
2101          * The distribution is a summary of the chained hash depth in
2102          * each of the libcfs hash buckets.  Each buckets hsb_count is
2103          * divided by the hash theta value and used to generate a
2104          * histogram of the hash distribution.  A uniform hash will
2105          * result in all hash buckets being close to the average thus
2106          * only the first few entries in the histogram will be non-zero.
2107          * If you hash function results in a non-uniform hash the will
2108          * be observable by outlier bucks in the distribution histogram.
2109          *
2110          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2111          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2112          */
2113         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2114                 struct cfs_hash_bd bd;
2115
2116                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2117                 cfs_hash_bd_lock(hs, &bd, 0);
2118                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2119                         maxdep  = bd.bd_bucket->hsb_depmax;
2120                         maxdepb = ffz(~maxdep);
2121                 }
2122                 total += bd.bd_bucket->hsb_count;
2123                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2124                 cfs_hash_bd_unlock(hs, &bd, 0);
2125         }
2126
2127         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2128         for (i = 0; i < 8; i++)
2129                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2130
2131         cfs_hash_unlock(hs, 0);
2132 }
2133 EXPORT_SYMBOL(cfs_hash_debug_str);