Whamcloud - gitweb
61fffb3c36637827adc42f34dd80960e48992610
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/hash.c
33  *
34  * Implement a hash class for hash process in lustre system.
35  *
36  * Author: YuZhangyong <yzy@clusterfs.com>
37  *
38  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
39  * - Simplified API and improved documentation
40  * - Added per-hash feature flags:
41  *   * CFS_HASH_DEBUG additional validation
42  *   * CFS_HASH_REHASH dynamic rehashing
43  * - Added per-hash statistics
44  * - General performance enhancements
45  *
46  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
47  * - move all stuff to libcfs
48  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
49  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
50  * - buckets are allocated one by one(instead of contiguous memory),
51  *   to avoid unnecessary cacheline conflict
52  *
53  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
54  * - "bucket" is a group of hlist_head now, user can specify bucket size
55  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
56  *   one lock for reducing memory overhead.
57  *
58  * - support lockless hash, caller will take care of locks:
59  *   avoid lock overhead for hash tables that are already protected
60  *   by locking in the caller for another reason
61  *
62  * - support both spin_lock/rwlock for bucket:
63  *   overhead of spinlock contention is lower than read/write
64  *   contention of rwlock, so using spinlock to serialize operations on
65  *   bucket is more reasonable for those frequently changed hash tables
66  *
67  * - support one-single lock mode:
68  *   one lock to protect all hash operations to avoid overhead of
69  *   multiple locks if hash table is always small
70  *
71  * - removed a lot of unnecessary addref & decref on hash element:
72  *   addref & decref are atomic operations in many use-cases which
73  *   are expensive.
74  *
75  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
76  *   some lustre use-cases require these functions to be strictly
77  *   non-blocking, we need to schedule required rehash on a different
78  *   thread on those cases.
79  *
80  * - safer rehash on large hash table
81  *   In old implementation, rehash function will exclusively lock the
82  *   hash table and finish rehash in one batch, it's dangerous on SMP
83  *   system because rehash millions of elements could take long time.
84  *   New implemented rehash can release lock and relax CPU in middle
85  *   of rehash, it's safe for another thread to search/change on the
86  *   hash table even it's in rehasing.
87  *
88  * - support two different refcount modes
89  *   . hash table has refcount on element
90  *   . hash table doesn't change refcount on adding/removing element
91  *
92  * - support long name hash table (for param-tree)
93  *
94  * - fix a bug for cfs_hash_rehash_key:
95  *   in old implementation, cfs_hash_rehash_key could screw up the
96  *   hash-table because @key is overwritten without any protection.
97  *   Now we need user to define hs_keycpy for those rehash enabled
98  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
99  *   inside lock by calling hs_keycpy.
100  *
101  * - better hash iteration:
102  *   Now we support both locked iteration & lockless iteration of hash
103  *   table. Also, user can break the iteration by return 1 in callback.
104  */
105 #include <linux/seq_file.h>
106
107 #include <libcfs/linux/linux-list.h>
108 #include <libcfs/libcfs.h>
109
110 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
111 static unsigned int warn_on_depth = 8;
112 module_param(warn_on_depth, uint, 0644);
113 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
114 #endif
115
116 struct cfs_wi_sched *cfs_sched_rehash;
117
118 static inline void
119 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
120
121 static inline void
122 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
126         __acquires(&lock->spin)
127 {
128         spin_lock(&lock->spin);
129 }
130
131 static inline void
132 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
133         __releases(&lock->spin)
134 {
135         spin_unlock(&lock->spin);
136 }
137
138 static inline void
139 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
140         __acquires(&lock->rw)
141 {
142         if (!exclusive)
143                 read_lock(&lock->rw);
144         else
145                 write_lock(&lock->rw);
146 }
147
148 static inline void
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150         __releases(&lock->rw)
151 {
152         if (!exclusive)
153                 read_unlock(&lock->rw);
154         else
155                 write_unlock(&lock->rw);
156 }
157
158 /** No lock hash */
159 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
160         .hs_lock        = cfs_hash_nl_lock,
161         .hs_unlock      = cfs_hash_nl_unlock,
162         .hs_bkt_lock    = cfs_hash_nl_lock,
163         .hs_bkt_unlock  = cfs_hash_nl_unlock,
164 };
165
166 /** no bucket lock, one spinlock to protect everything */
167 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
168         .hs_lock        = cfs_hash_spin_lock,
169         .hs_unlock      = cfs_hash_spin_unlock,
170         .hs_bkt_lock    = cfs_hash_nl_lock,
171         .hs_bkt_unlock  = cfs_hash_nl_unlock,
172 };
173
174 /** spin bucket lock, rehash is enabled */
175 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
176         .hs_lock        = cfs_hash_rw_lock,
177         .hs_unlock      = cfs_hash_rw_unlock,
178         .hs_bkt_lock    = cfs_hash_spin_lock,
179         .hs_bkt_unlock  = cfs_hash_spin_unlock,
180 };
181
182 /** rw bucket lock, rehash is enabled */
183 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
184         .hs_lock        = cfs_hash_rw_lock,
185         .hs_unlock      = cfs_hash_rw_unlock,
186         .hs_bkt_lock    = cfs_hash_rw_lock,
187         .hs_bkt_unlock  = cfs_hash_rw_unlock,
188 };
189
190 /** spin bucket lock, rehash is disabled */
191 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
192         .hs_lock        = cfs_hash_nl_lock,
193         .hs_unlock      = cfs_hash_nl_unlock,
194         .hs_bkt_lock    = cfs_hash_spin_lock,
195         .hs_bkt_unlock  = cfs_hash_spin_unlock,
196 };
197
198 /** rw bucket lock, rehash is disabled */
199 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
200         .hs_lock        = cfs_hash_nl_lock,
201         .hs_unlock      = cfs_hash_nl_unlock,
202         .hs_bkt_lock    = cfs_hash_rw_lock,
203         .hs_bkt_unlock  = cfs_hash_rw_unlock,
204 };
205
206 static void
207 cfs_hash_lock_setup(struct cfs_hash *hs)
208 {
209         if (cfs_hash_with_no_lock(hs)) {
210                 hs->hs_lops = &cfs_hash_nl_lops;
211
212         } else if (cfs_hash_with_no_bktlock(hs)) {
213                 hs->hs_lops = &cfs_hash_nbl_lops;
214                 spin_lock_init(&hs->hs_lock.spin);
215
216         } else if (cfs_hash_with_rehash(hs)) {
217                 rwlock_init(&hs->hs_lock.rw);
218
219                 if (cfs_hash_with_rw_bktlock(hs))
220                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
221                 else if (cfs_hash_with_spin_bktlock(hs))
222                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
223                 else
224                         LBUG();
225         } else {
226                 if (cfs_hash_with_rw_bktlock(hs))
227                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
228                 else if (cfs_hash_with_spin_bktlock(hs))
229                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
230                 else
231                         LBUG();
232         }
233 }
234
235 /**
236  * Simple hash head without depth tracking
237  * new element is always added to head of hlist
238  */
239 struct cfs_hash_head {
240         struct hlist_head       hh_head;        /**< entries list */
241 };
242
243 static int
244 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
245 {
246         return sizeof(struct cfs_hash_head);
247 }
248
249 static struct hlist_head *
250 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
251 {
252         struct cfs_hash_head *head;
253
254         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
255         return &head[bd->bd_offset].hh_head;
256 }
257
258 static int
259 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
260                       struct hlist_node *hnode)
261 {
262         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
263         return -1; /* unknown depth */
264 }
265
266 static int
267 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
268                       struct hlist_node *hnode)
269 {
270         hlist_del_init(hnode);
271         return -1; /* unknown depth */
272 }
273
274 /**
275  * Simple hash head with depth tracking
276  * new element is always added to head of hlist
277  */
278 struct cfs_hash_head_dep {
279         struct hlist_head       hd_head;        /**< entries list */
280         unsigned int            hd_depth;       /**< list length */
281 };
282
283 static int
284 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
285 {
286         return sizeof(struct cfs_hash_head_dep);
287 }
288
289 static struct hlist_head *
290 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
291 {
292         struct cfs_hash_head_dep   *head;
293
294         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
295         return &head[bd->bd_offset].hd_head;
296 }
297
298 static int
299 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
300                       struct hlist_node *hnode)
301 {
302         struct cfs_hash_head_dep *hh;
303
304         hh = container_of(cfs_hash_hd_hhead(hs, bd),
305                           struct cfs_hash_head_dep, hd_head);
306         hlist_add_head(hnode, &hh->hd_head);
307         return ++hh->hd_depth;
308 }
309
310 static int
311 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
312                       struct hlist_node *hnode)
313 {
314         struct cfs_hash_head_dep *hh;
315
316         hh = container_of(cfs_hash_hd_hhead(hs, bd),
317                           struct cfs_hash_head_dep, hd_head);
318         hlist_del_init(hnode);
319         return --hh->hd_depth;
320 }
321
322 /**
323  * double links hash head without depth tracking
324  * new element is always added to tail of hlist
325  */
326 struct cfs_hash_dhead {
327         struct hlist_head       dh_head;        /**< entries list */
328         struct hlist_node       *dh_tail;       /**< the last entry */
329 };
330
331 static int
332 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
333 {
334         return sizeof(struct cfs_hash_dhead);
335 }
336
337 static struct hlist_head *
338 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
339 {
340         struct cfs_hash_dhead *head;
341
342         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
343         return &head[bd->bd_offset].dh_head;
344 }
345
346 static int
347 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
348                       struct hlist_node *hnode)
349 {
350         struct cfs_hash_dhead *dh;
351
352         dh = container_of(cfs_hash_dh_hhead(hs, bd),
353                           struct cfs_hash_dhead, dh_head);
354         if (dh->dh_tail != NULL) /* not empty */
355                 hlist_add_behind(hnode, dh->dh_tail);
356         else /* empty list */
357                 hlist_add_head(hnode, &dh->dh_head);
358         dh->dh_tail = hnode;
359         return -1; /* unknown depth */
360 }
361
362 static int
363 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
364                       struct hlist_node *hnd)
365 {
366         struct cfs_hash_dhead *dh;
367
368         dh = container_of(cfs_hash_dh_hhead(hs, bd),
369                           struct cfs_hash_dhead, dh_head);
370         if (hnd->next == NULL) { /* it's the tail */
371                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
372                               container_of(hnd->pprev, struct hlist_node, next);
373         }
374         hlist_del_init(hnd);
375         return -1; /* unknown depth */
376 }
377
378 /**
379  * double links hash head with depth tracking
380  * new element is always added to tail of hlist
381  */
382 struct cfs_hash_dhead_dep {
383         struct hlist_head       dd_head;        /**< entries list */
384         struct hlist_node       *dd_tail;       /**< the last entry */
385         unsigned int            dd_depth;       /**< list length */
386 };
387
388 static int
389 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
390 {
391         return sizeof(struct cfs_hash_dhead_dep);
392 }
393
394 static struct hlist_head *
395 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
396 {
397         struct cfs_hash_dhead_dep *head;
398
399         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
400         return &head[bd->bd_offset].dd_head;
401 }
402
403 static int
404 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
405                       struct hlist_node *hnode)
406 {
407         struct cfs_hash_dhead_dep *dh;
408
409         dh = container_of(cfs_hash_dd_hhead(hs, bd),
410                           struct cfs_hash_dhead_dep, dd_head);
411         if (dh->dd_tail != NULL) /* not empty */
412                 hlist_add_behind(hnode, dh->dd_tail);
413         else /* empty list */
414                 hlist_add_head(hnode, &dh->dd_head);
415         dh->dd_tail = hnode;
416         return ++dh->dd_depth;
417 }
418
419 static int
420 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
421                       struct hlist_node *hnd)
422 {
423         struct cfs_hash_dhead_dep *dh;
424
425         dh = container_of(cfs_hash_dd_hhead(hs, bd),
426                           struct cfs_hash_dhead_dep, dd_head);
427         if (hnd->next == NULL) { /* it's the tail */
428                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
429                               container_of(hnd->pprev, struct hlist_node, next);
430         }
431         hlist_del_init(hnd);
432         return --dh->dd_depth;
433 }
434
435 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
436        .hop_hhead      = cfs_hash_hh_hhead,
437        .hop_hhead_size = cfs_hash_hh_hhead_size,
438        .hop_hnode_add  = cfs_hash_hh_hnode_add,
439        .hop_hnode_del  = cfs_hash_hh_hnode_del,
440 };
441
442 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
443        .hop_hhead      = cfs_hash_hd_hhead,
444        .hop_hhead_size = cfs_hash_hd_hhead_size,
445        .hop_hnode_add  = cfs_hash_hd_hnode_add,
446        .hop_hnode_del  = cfs_hash_hd_hnode_del,
447 };
448
449 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
450        .hop_hhead      = cfs_hash_dh_hhead,
451        .hop_hhead_size = cfs_hash_dh_hhead_size,
452        .hop_hnode_add  = cfs_hash_dh_hnode_add,
453        .hop_hnode_del  = cfs_hash_dh_hnode_del,
454 };
455
456 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
457        .hop_hhead      = cfs_hash_dd_hhead,
458        .hop_hhead_size = cfs_hash_dd_hhead_size,
459        .hop_hnode_add  = cfs_hash_dd_hnode_add,
460        .hop_hnode_del  = cfs_hash_dd_hnode_del,
461 };
462
463 static void
464 cfs_hash_hlist_setup(struct cfs_hash *hs)
465 {
466         if (cfs_hash_with_add_tail(hs)) {
467                 hs->hs_hops = cfs_hash_with_depth(hs) ?
468                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
469         } else {
470                 hs->hs_hops = cfs_hash_with_depth(hs) ?
471                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
472         }
473 }
474
475 static void
476 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
477                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
478 {
479         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
480
481         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
482
483         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
484         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
485 }
486
487 void
488 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
489 {
490         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
491         if (likely(hs->hs_rehash_buckets == NULL)) {
492                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
493                                      hs->hs_cur_bits, key, bd);
494         } else {
495                 LASSERT(hs->hs_rehash_bits != 0);
496                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
497                                      hs->hs_rehash_bits, key, bd);
498         }
499 }
500 EXPORT_SYMBOL(cfs_hash_bd_get);
501
502 static inline void
503 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
504 {
505         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
506                 return;
507
508         bd->bd_bucket->hsb_depmax = dep_cur;
509 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
510         if (likely(warn_on_depth == 0 ||
511                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
512                 return;
513
514         spin_lock(&hs->hs_dep_lock);
515         hs->hs_dep_max  = dep_cur;
516         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
517         hs->hs_dep_off  = bd->bd_offset;
518         hs->hs_dep_bits = hs->hs_cur_bits;
519         spin_unlock(&hs->hs_dep_lock);
520
521         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
522 # endif
523 }
524
525 void
526 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
527                         struct hlist_node *hnode)
528 {
529         int rc;
530
531         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
532         cfs_hash_bd_dep_record(hs, bd, rc);
533         bd->bd_bucket->hsb_version++;
534         if (unlikely(bd->bd_bucket->hsb_version == 0))
535                 bd->bd_bucket->hsb_version++;
536         bd->bd_bucket->hsb_count++;
537
538         if (cfs_hash_with_counter(hs))
539                 atomic_inc(&hs->hs_count);
540         if (!cfs_hash_with_no_itemref(hs))
541                 cfs_hash_get(hs, hnode);
542 }
543 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
544
545 void
546 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
547                        struct hlist_node *hnode)
548 {
549         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
550
551         LASSERT(bd->bd_bucket->hsb_count > 0);
552         bd->bd_bucket->hsb_count--;
553         bd->bd_bucket->hsb_version++;
554         if (unlikely(bd->bd_bucket->hsb_version == 0))
555                 bd->bd_bucket->hsb_version++;
556
557         if (cfs_hash_with_counter(hs)) {
558                 LASSERT(atomic_read(&hs->hs_count) > 0);
559                 atomic_dec(&hs->hs_count);
560         }
561         if (!cfs_hash_with_no_itemref(hs))
562                 cfs_hash_put_locked(hs, hnode);
563 }
564 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
565
566 void
567 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
568                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
569 {
570         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
571         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
572         int                rc;
573
574         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
575                 return;
576
577         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
578          * in cfs_hash_bd_del/add_locked */
579         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
580         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
581         cfs_hash_bd_dep_record(hs, bd_new, rc);
582
583         LASSERT(obkt->hsb_count > 0);
584         obkt->hsb_count--;
585         obkt->hsb_version++;
586         if (unlikely(obkt->hsb_version == 0))
587                 obkt->hsb_version++;
588         nbkt->hsb_count++;
589         nbkt->hsb_version++;
590         if (unlikely(nbkt->hsb_version == 0))
591                 nbkt->hsb_version++;
592 }
593
594 enum {
595         /** always set, for sanity (avoid ZERO intent) */
596         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
597         /** return entry with a ref */
598         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
599         /** add entry if not existing */
600         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
601         /** delete entry, ignore other masks */
602         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
603 };
604
605 enum cfs_hash_lookup_intent {
606         /** return item w/o refcount */
607         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
608         /** return item with refcount */
609         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
610                                        CFS_HS_LOOKUP_MASK_REF),
611         /** return item w/o refcount if existed, otherwise add */
612         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
613                                        CFS_HS_LOOKUP_MASK_ADD),
614         /** return item with refcount if existed, otherwise add */
615         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
616                                        CFS_HS_LOOKUP_MASK_ADD),
617         /** delete if existed */
618         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
619                                        CFS_HS_LOOKUP_MASK_DEL)
620 };
621
622 static struct hlist_node *
623 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
624                           const void *key, struct hlist_node *hnode,
625                           enum cfs_hash_lookup_intent intent)
626
627 {
628         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
629         struct hlist_node  *ehnode;
630         struct hlist_node  *match;
631         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
632
633         /* with this function, we can avoid a lot of useless refcount ops,
634          * which are expensive atomic operations most time. */
635         match = intent_add ? NULL : hnode;
636         hlist_for_each(ehnode, hhead) {
637                 if (!cfs_hash_keycmp(hs, key, ehnode))
638                         continue;
639
640                 if (match != NULL && match != ehnode) /* can't match */
641                         continue;
642
643                 /* match and ... */
644                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
645                         cfs_hash_bd_del_locked(hs, bd, ehnode);
646                         return ehnode;
647                 }
648
649                 /* caller wants refcount? */
650                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
651                         cfs_hash_get(hs, ehnode);
652                 return ehnode;
653         }
654         /* no match item */
655         if (!intent_add)
656                 return NULL;
657
658         LASSERT(hnode != NULL);
659         cfs_hash_bd_add_locked(hs, bd, hnode);
660         return hnode;
661 }
662
663 struct hlist_node *
664 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
665                           const void *key)
666 {
667         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
668                                         CFS_HS_LOOKUP_IT_FIND);
669 }
670 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
671
672 struct hlist_node *
673 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
674                         const void *key)
675 {
676         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
677                                         CFS_HS_LOOKUP_IT_PEEK);
678 }
679 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
680
681 static void
682 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
683                        unsigned n, int excl)
684 {
685         struct cfs_hash_bucket *prev = NULL;
686         int                i;
687
688         /**
689          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
690          * NB: it's possible that several bds point to the same bucket but
691          * have different bd::bd_offset, so need take care of deadlock.
692          */
693         cfs_hash_for_each_bd(bds, n, i) {
694                 if (prev == bds[i].bd_bucket)
695                         continue;
696
697                 LASSERT(prev == NULL ||
698                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
699                 cfs_hash_bd_lock(hs, &bds[i], excl);
700                 prev = bds[i].bd_bucket;
701         }
702 }
703
704 static void
705 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
706                          unsigned n, int excl)
707 {
708         struct cfs_hash_bucket *prev = NULL;
709         int                i;
710
711         cfs_hash_for_each_bd(bds, n, i) {
712                 if (prev != bds[i].bd_bucket) {
713                         cfs_hash_bd_unlock(hs, &bds[i], excl);
714                         prev = bds[i].bd_bucket;
715                 }
716         }
717 }
718
719 static struct hlist_node *
720 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
721                                 unsigned n, const void *key)
722 {
723         struct hlist_node *ehnode;
724         unsigned          i;
725
726         cfs_hash_for_each_bd(bds, n, i) {
727                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
728                                                         CFS_HS_LOOKUP_IT_FIND);
729                 if (ehnode != NULL)
730                         return ehnode;
731         }
732         return NULL;
733 }
734
735 static struct hlist_node *
736 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
737                                  unsigned n, const void *key,
738                                  struct hlist_node *hnode, int noref)
739 {
740         struct hlist_node *ehnode;
741         int               intent;
742         unsigned          i;
743
744         LASSERT(hnode != NULL);
745         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
746
747         cfs_hash_for_each_bd(bds, n, i) {
748                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
749                                                    NULL, intent);
750                 if (ehnode != NULL)
751                         return ehnode;
752         }
753
754         if (i == 1) { /* only one bucket */
755                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
756         } else {
757                 struct cfs_hash_bd      mybd;
758
759                 cfs_hash_bd_get(hs, key, &mybd);
760                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
761         }
762
763         return hnode;
764 }
765
766 static struct hlist_node *
767 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
768                                  unsigned n, const void *key,
769                                  struct hlist_node *hnode)
770 {
771         struct hlist_node *ehnode;
772         unsigned           i;
773
774         cfs_hash_for_each_bd(bds, n, i) {
775                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
776                                                    CFS_HS_LOOKUP_IT_FINDDEL);
777                 if (ehnode != NULL)
778                         return ehnode;
779         }
780         return NULL;
781 }
782
783 static void
784 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
785 {
786         int     rc;
787
788         if (bd2->bd_bucket == NULL)
789                 return;
790
791         if (bd1->bd_bucket == NULL) {
792                 *bd1 = *bd2;
793                 bd2->bd_bucket = NULL;
794                 return;
795         }
796
797         rc = cfs_hash_bd_compare(bd1, bd2);
798         if (rc == 0) {
799                 bd2->bd_bucket = NULL;
800
801         } else if (rc > 0) { /* swab bd1 and bd2 */
802                 struct cfs_hash_bd tmp;
803
804                 tmp = *bd2;
805                 *bd2 = *bd1;
806                 *bd1 = tmp;
807         }
808 }
809
810 void
811 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
812                      struct cfs_hash_bd *bds)
813 {
814         /* NB: caller should hold hs_lock.rw if REHASH is set */
815         cfs_hash_bd_from_key(hs, hs->hs_buckets,
816                              hs->hs_cur_bits, key, &bds[0]);
817         if (likely(hs->hs_rehash_buckets == NULL)) {
818                 /* no rehash or not rehashing */
819                 bds[1].bd_bucket = NULL;
820                 return;
821         }
822
823         LASSERT(hs->hs_rehash_bits != 0);
824         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
825                              hs->hs_rehash_bits, key, &bds[1]);
826
827         cfs_hash_bd_order(&bds[0], &bds[1]);
828 }
829
830 void
831 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
832 {
833         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
834 }
835
836 void
837 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
838 {
839         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
840 }
841
842 struct hlist_node *
843 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
844                                const void *key)
845 {
846         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
847 }
848
849 struct hlist_node *
850 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
851                                 const void *key, struct hlist_node *hnode,
852                                 int noref)
853 {
854         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
855                                                 hnode, noref);
856 }
857
858 struct hlist_node *
859 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
860                                 const void *key, struct hlist_node *hnode)
861 {
862         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
863 }
864
865 static void
866 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
867                       int bkt_size, int prev_size, int size)
868 {
869         int     i;
870
871         for (i = prev_size; i < size; i++) {
872                 if (buckets[i] != NULL)
873                         LIBCFS_FREE(buckets[i], bkt_size);
874         }
875
876         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
877 }
878
879 /*
880  * Create or grow bucket memory. Return old_buckets if no allocation was
881  * needed, the newly allocated buckets if allocation was needed and
882  * successful, and NULL on error.
883  */
884 static struct cfs_hash_bucket **
885 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
886                          unsigned int old_size, unsigned int new_size)
887 {
888         struct cfs_hash_bucket **new_bkts;
889         int                 i;
890
891         LASSERT(old_size == 0 || old_bkts != NULL);
892
893         if (old_bkts != NULL && old_size == new_size)
894                 return old_bkts;
895
896         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
897         if (new_bkts == NULL)
898                 return NULL;
899
900         if (old_bkts != NULL) {
901                 memcpy(new_bkts, old_bkts,
902                        min(old_size, new_size) * sizeof(*old_bkts));
903         }
904
905         for (i = old_size; i < new_size; i++) {
906                 struct hlist_head *hhead;
907                 struct cfs_hash_bd     bd;
908
909                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
910                 if (new_bkts[i] == NULL) {
911                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
912                                               old_size, new_size);
913                         return NULL;
914                 }
915
916                 new_bkts[i]->hsb_index   = i;
917                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
918                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
919                 bd.bd_bucket = new_bkts[i];
920                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
921                         INIT_HLIST_HEAD(hhead);
922
923                 if (cfs_hash_with_no_lock(hs) ||
924                     cfs_hash_with_no_bktlock(hs))
925                         continue;
926
927                 if (cfs_hash_with_rw_bktlock(hs))
928                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
929                 else if (cfs_hash_with_spin_bktlock(hs))
930                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
931                 else
932                         LBUG(); /* invalid use-case */
933         }
934         return new_bkts;
935 }
936
937 /**
938  * Initialize new libcfs hash, where:
939  * @name     - Descriptive hash name
940  * @cur_bits - Initial hash table size, in bits
941  * @max_bits - Maximum allowed hash table resize, in bits
942  * @ops      - Registered hash table operations
943  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
944  *           - CFS_HASH_SORT enable chained hash sort
945  */
946 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
947
948 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
949 static int cfs_hash_dep_print(struct cfs_workitem *wi)
950 {
951         struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
952         int         dep;
953         int         bkt;
954         int         off;
955         int         bits;
956
957         spin_lock(&hs->hs_dep_lock);
958         dep  = hs->hs_dep_max;
959         bkt  = hs->hs_dep_bkt;
960         off  = hs->hs_dep_off;
961         bits = hs->hs_dep_bits;
962         spin_unlock(&hs->hs_dep_lock);
963
964         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
965                       hs->hs_name, bits, dep, bkt, off);
966         spin_lock(&hs->hs_dep_lock);
967         hs->hs_dep_bits = 0; /* mark as workitem done */
968         spin_unlock(&hs->hs_dep_lock);
969         return 0;
970 }
971
972 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
973 {
974         spin_lock_init(&hs->hs_dep_lock);
975         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
976 }
977
978 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
979 {
980         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
981                 return;
982
983         spin_lock(&hs->hs_dep_lock);
984         while (hs->hs_dep_bits != 0) {
985                 spin_unlock(&hs->hs_dep_lock);
986                 cond_resched();
987                 spin_lock(&hs->hs_dep_lock);
988         }
989         spin_unlock(&hs->hs_dep_lock);
990 }
991
992 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
993
994 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
995 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
996
997 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
998
999 struct cfs_hash *
1000 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1001                 unsigned bkt_bits, unsigned extra_bytes,
1002                 unsigned min_theta, unsigned max_theta,
1003                 struct cfs_hash_ops *ops, unsigned flags)
1004 {
1005         struct cfs_hash *hs;
1006         int         len;
1007
1008         ENTRY;
1009
1010         CLASSERT(CFS_HASH_THETA_BITS < 15);
1011
1012         LASSERT(name != NULL);
1013         LASSERT(ops != NULL);
1014         LASSERT(ops->hs_key);
1015         LASSERT(ops->hs_hash);
1016         LASSERT(ops->hs_object);
1017         LASSERT(ops->hs_keycmp);
1018         LASSERT(ops->hs_get != NULL);
1019         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1020
1021         if ((flags & CFS_HASH_REHASH) != 0)
1022                 flags |= CFS_HASH_COUNTER; /* must have counter */
1023
1024         LASSERT(cur_bits > 0);
1025         LASSERT(cur_bits >= bkt_bits);
1026         LASSERT(max_bits >= cur_bits && max_bits < 31);
1027         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1028         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1029                      (flags & CFS_HASH_NO_LOCK) == 0));
1030         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1031                       ops->hs_keycpy != NULL));
1032
1033         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1034               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1035         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1036         if (hs == NULL)
1037                 RETURN(NULL);
1038
1039         strlcpy(hs->hs_name, name, len);
1040         hs->hs_flags = flags;
1041
1042         atomic_set(&hs->hs_refcount, 1);
1043         atomic_set(&hs->hs_count, 0);
1044
1045         cfs_hash_lock_setup(hs);
1046         cfs_hash_hlist_setup(hs);
1047
1048         hs->hs_cur_bits = (__u8)cur_bits;
1049         hs->hs_min_bits = (__u8)cur_bits;
1050         hs->hs_max_bits = (__u8)max_bits;
1051         hs->hs_bkt_bits = (__u8)bkt_bits;
1052
1053         hs->hs_ops         = ops;
1054         hs->hs_extra_bytes = extra_bytes;
1055         hs->hs_rehash_bits = 0;
1056         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1057         cfs_hash_depth_wi_init(hs);
1058
1059         if (cfs_hash_with_rehash(hs))
1060                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1061
1062         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1063                                                   CFS_HASH_NBKT(hs));
1064         if (hs->hs_buckets != NULL)
1065                 return hs;
1066
1067         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1068         RETURN(NULL);
1069 }
1070 EXPORT_SYMBOL(cfs_hash_create);
1071
1072 /**
1073  * Cleanup libcfs hash @hs.
1074  */
1075 static void
1076 cfs_hash_destroy(struct cfs_hash *hs)
1077 {
1078         struct hlist_node     *hnode;
1079         struct hlist_node     *pos;
1080         struct cfs_hash_bd         bd;
1081         int                   i;
1082         ENTRY;
1083
1084         LASSERT(hs != NULL);
1085         LASSERT(!cfs_hash_is_exiting(hs) &&
1086                 !cfs_hash_is_iterating(hs));
1087
1088         /**
1089          * prohibit further rehashes, don't need any lock because
1090          * I'm the only (last) one can change it.
1091          */
1092         hs->hs_exiting = 1;
1093         if (cfs_hash_with_rehash(hs))
1094                 cfs_hash_rehash_cancel(hs);
1095
1096         cfs_hash_depth_wi_cancel(hs);
1097         /* rehash should be done/canceled */
1098         LASSERT(hs->hs_buckets != NULL &&
1099                 hs->hs_rehash_buckets == NULL);
1100
1101         cfs_hash_for_each_bucket(hs, &bd, i) {
1102                 struct hlist_head *hhead;
1103
1104                 LASSERT(bd.bd_bucket != NULL);
1105                 /* no need to take this lock, just for consistent code */
1106                 cfs_hash_bd_lock(hs, &bd, 1);
1107
1108                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1109                         hlist_for_each_safe(hnode, pos, hhead) {
1110                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1111                                         "hash %s bucket %u(%u) is not "
1112                                         " empty: %u items left\n",
1113                                         hs->hs_name, bd.bd_bucket->hsb_index,
1114                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1115                                 /* can't assert key valicate, because we
1116                                  * can interrupt rehash */
1117                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1118                                 cfs_hash_exit(hs, hnode);
1119                         }
1120                 }
1121                 LASSERT(bd.bd_bucket->hsb_count == 0);
1122                 cfs_hash_bd_unlock(hs, &bd, 1);
1123                 cond_resched();
1124         }
1125
1126         LASSERT(atomic_read(&hs->hs_count) == 0);
1127
1128         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1129                               0, CFS_HASH_NBKT(hs));
1130         i = cfs_hash_with_bigname(hs) ?
1131             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1132         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1133
1134         EXIT;
1135 }
1136
1137 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1138 {
1139         if (atomic_inc_not_zero(&hs->hs_refcount))
1140                 return hs;
1141         return NULL;
1142 }
1143 EXPORT_SYMBOL(cfs_hash_getref);
1144
1145 void cfs_hash_putref(struct cfs_hash *hs)
1146 {
1147         if (atomic_dec_and_test(&hs->hs_refcount))
1148                 cfs_hash_destroy(hs);
1149 }
1150 EXPORT_SYMBOL(cfs_hash_putref);
1151
1152 static inline int
1153 cfs_hash_rehash_bits(struct cfs_hash *hs)
1154 {
1155         if (cfs_hash_with_no_lock(hs) ||
1156             !cfs_hash_with_rehash(hs))
1157                 return -EOPNOTSUPP;
1158
1159         if (unlikely(cfs_hash_is_exiting(hs)))
1160                 return -ESRCH;
1161
1162         if (unlikely(cfs_hash_is_rehashing(hs)))
1163                 return -EALREADY;
1164
1165         if (unlikely(cfs_hash_is_iterating(hs)))
1166                 return -EAGAIN;
1167
1168         /* XXX: need to handle case with max_theta != 2.0
1169          *      and the case with min_theta != 0.5 */
1170         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1171             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1172                 return hs->hs_cur_bits + 1;
1173
1174         if (!cfs_hash_with_shrink(hs))
1175                 return 0;
1176
1177         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1178             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1179                 return hs->hs_cur_bits - 1;
1180
1181         return 0;
1182 }
1183
1184 /**
1185  * don't allow inline rehash if:
1186  * - user wants non-blocking change (add/del) on hash table
1187  * - too many elements
1188  */
1189 static inline int
1190 cfs_hash_rehash_inline(struct cfs_hash *hs)
1191 {
1192         return !cfs_hash_with_nblk_change(hs) &&
1193                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1194 }
1195
1196 /**
1197  * Add item @hnode to libcfs hash @hs using @key.  The registered
1198  * ops->hs_get function will be called when the item is added.
1199  */
1200 void
1201 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1202 {
1203         struct cfs_hash_bd   bd;
1204         int             bits;
1205
1206         LASSERT(hlist_unhashed(hnode));
1207
1208         cfs_hash_lock(hs, 0);
1209         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1210
1211         cfs_hash_key_validate(hs, key, hnode);
1212         cfs_hash_bd_add_locked(hs, &bd, hnode);
1213
1214         cfs_hash_bd_unlock(hs, &bd, 1);
1215
1216         bits = cfs_hash_rehash_bits(hs);
1217         cfs_hash_unlock(hs, 0);
1218         if (bits > 0)
1219                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1220 }
1221 EXPORT_SYMBOL(cfs_hash_add);
1222
1223 static struct hlist_node *
1224 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1225                      struct hlist_node *hnode, int noref)
1226 {
1227         struct hlist_node *ehnode;
1228         struct cfs_hash_bd     bds[2];
1229         int               bits = 0;
1230
1231         LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1232
1233         cfs_hash_lock(hs, 0);
1234         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1235
1236         cfs_hash_key_validate(hs, key, hnode);
1237         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1238                                                  hnode, noref);
1239         cfs_hash_dual_bd_unlock(hs, bds, 1);
1240
1241         if (ehnode == hnode) /* new item added */
1242                 bits = cfs_hash_rehash_bits(hs);
1243         cfs_hash_unlock(hs, 0);
1244         if (bits > 0)
1245                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1246
1247         return ehnode;
1248 }
1249
1250 /**
1251  * Add item @hnode to libcfs hash @hs using @key.  The registered
1252  * ops->hs_get function will be called if the item was added.
1253  * Returns 0 on success or -EALREADY on key collisions.
1254  */
1255 int
1256 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1257                     struct hlist_node *hnode)
1258 {
1259         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1260                -EALREADY : 0;
1261 }
1262 EXPORT_SYMBOL(cfs_hash_add_unique);
1263
1264 /**
1265  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1266  * already exists in the hash then ops->hs_get will be called on the
1267  * conflicting entry and that entry will be returned to the caller.
1268  * Otherwise ops->hs_get is called on the item which was added.
1269  */
1270 void *
1271 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1272                         struct hlist_node *hnode)
1273 {
1274         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1275
1276         return cfs_hash_object(hs, hnode);
1277 }
1278 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1279
1280 /**
1281  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1282  * is required to ensure the correct hash bucket is locked since there
1283  * is no direct linkage from the item to the bucket.  The object
1284  * removed from the hash will be returned and obs->hs_put is called
1285  * on the removed object.
1286  */
1287 void *
1288 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1289 {
1290         void           *obj  = NULL;
1291         int             bits = 0;
1292         struct cfs_hash_bd   bds[2];
1293
1294         cfs_hash_lock(hs, 0);
1295         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1296
1297         /* NB: do nothing if @hnode is not in hash table */
1298         if (hnode == NULL || !hlist_unhashed(hnode)) {
1299                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1300                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1301                 } else {
1302                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1303                                                                 key, hnode);
1304                 }
1305         }
1306
1307         if (hnode != NULL) {
1308                 obj  = cfs_hash_object(hs, hnode);
1309                 bits = cfs_hash_rehash_bits(hs);
1310         }
1311
1312         cfs_hash_dual_bd_unlock(hs, bds, 1);
1313         cfs_hash_unlock(hs, 0);
1314         if (bits > 0)
1315                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1316
1317         return obj;
1318 }
1319 EXPORT_SYMBOL(cfs_hash_del);
1320
1321 /**
1322  * Delete item given @key in libcfs hash @hs.  The first @key found in
1323  * the hash will be removed, if the key exists multiple times in the hash
1324  * @hs this function must be called once per key.  The removed object
1325  * will be returned and ops->hs_put is called on the removed object.
1326  */
1327 void *
1328 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1329 {
1330         return cfs_hash_del(hs, key, NULL);
1331 }
1332 EXPORT_SYMBOL(cfs_hash_del_key);
1333
1334 /**
1335  * Lookup an item using @key in the libcfs hash @hs and return it.
1336  * If the @key is found in the hash hs->hs_get() is called and the
1337  * matching objects is returned.  It is the callers responsibility
1338  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1339  * when when finished with the object.  If the @key was not found
1340  * in the hash @hs NULL is returned.
1341  */
1342 void *
1343 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1344 {
1345         void                 *obj = NULL;
1346         struct hlist_node     *hnode;
1347         struct cfs_hash_bd         bds[2];
1348
1349         cfs_hash_lock(hs, 0);
1350         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1351
1352         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1353         if (hnode != NULL)
1354                 obj = cfs_hash_object(hs, hnode);
1355
1356         cfs_hash_dual_bd_unlock(hs, bds, 0);
1357         cfs_hash_unlock(hs, 0);
1358
1359         return obj;
1360 }
1361 EXPORT_SYMBOL(cfs_hash_lookup);
1362
1363 static void
1364 cfs_hash_for_each_enter(struct cfs_hash *hs)
1365 {
1366         LASSERT(!cfs_hash_is_exiting(hs));
1367
1368         if (!cfs_hash_with_rehash(hs))
1369                 return;
1370         /*
1371          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1372          * because it's just an unreliable signal to rehash-thread,
1373          * rehash-thread will try to finish rehash ASAP when seeing this.
1374          */
1375         hs->hs_iterating = 1;
1376
1377         cfs_hash_lock(hs, 1);
1378         hs->hs_iterators++;
1379
1380         /* NB: iteration is mostly called by service thread,
1381          * we tend to cancel pending rehash-request, instead of
1382          * blocking service thread, we will relaunch rehash request
1383          * after iteration */
1384         if (cfs_hash_is_rehashing(hs))
1385                 cfs_hash_rehash_cancel_locked(hs);
1386         cfs_hash_unlock(hs, 1);
1387 }
1388
1389 static void
1390 cfs_hash_for_each_exit(struct cfs_hash *hs)
1391 {
1392         int remained;
1393         int bits;
1394
1395         if (!cfs_hash_with_rehash(hs))
1396                 return;
1397         cfs_hash_lock(hs, 1);
1398         remained = --hs->hs_iterators;
1399         bits = cfs_hash_rehash_bits(hs);
1400         cfs_hash_unlock(hs, 1);
1401         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1402         if (remained == 0)
1403                 hs->hs_iterating = 0;
1404         if (bits > 0) {
1405                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1406                                     CFS_HASH_LOOP_HOG);
1407         }
1408 }
1409
1410 /**
1411  * For each item in the libcfs hash @hs call the passed callback @func
1412  * and pass to it as an argument each hash item and the private @data.
1413  *
1414  * a) the function may sleep!
1415  * b) during the callback:
1416  *    . the bucket lock is held so the callback must never sleep.
1417  *    . if @removal_safe is true, use can remove current item by
1418  *      cfs_hash_bd_del_locked
1419  */
1420 static __u64
1421 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1422                         void *data, int remove_safe)
1423 {
1424         struct hlist_node       *hnode;
1425         struct hlist_node       *pos;
1426         struct cfs_hash_bd      bd;
1427         __u64                   count = 0;
1428         int                     excl  = !!remove_safe;
1429         int                     loop  = 0;
1430         int                     i;
1431         ENTRY;
1432
1433         cfs_hash_for_each_enter(hs);
1434
1435         cfs_hash_lock(hs, 0);
1436         LASSERT(!cfs_hash_is_rehashing(hs));
1437
1438         cfs_hash_for_each_bucket(hs, &bd, i) {
1439                 struct hlist_head *hhead;
1440
1441                 cfs_hash_bd_lock(hs, &bd, excl);
1442                 if (func == NULL) { /* only glimpse size */
1443                         count += bd.bd_bucket->hsb_count;
1444                         cfs_hash_bd_unlock(hs, &bd, excl);
1445                         continue;
1446                 }
1447
1448                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1449                         hlist_for_each_safe(hnode, pos, hhead) {
1450                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1451                                 count++;
1452                                 loop++;
1453                                 if (func(hs, &bd, hnode, data)) {
1454                                         cfs_hash_bd_unlock(hs, &bd, excl);
1455                                         goto out;
1456                                 }
1457                         }
1458                 }
1459                 cfs_hash_bd_unlock(hs, &bd, excl);
1460                 if (loop < CFS_HASH_LOOP_HOG)
1461                         continue;
1462                 loop = 0;
1463                 cfs_hash_unlock(hs, 0);
1464                 cond_resched();
1465                 cfs_hash_lock(hs, 0);
1466         }
1467  out:
1468         cfs_hash_unlock(hs, 0);
1469
1470         cfs_hash_for_each_exit(hs);
1471         RETURN(count);
1472 }
1473
1474 struct cfs_hash_cond_arg {
1475         cfs_hash_cond_opt_cb_t  func;
1476         void                   *arg;
1477 };
1478
1479 static int
1480 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1481                          struct hlist_node *hnode, void *data)
1482 {
1483         struct cfs_hash_cond_arg *cond = data;
1484
1485         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1486                 cfs_hash_bd_del_locked(hs, bd, hnode);
1487         return 0;
1488 }
1489
1490 /**
1491  * Delete item from the libcfs hash @hs when @func return true.
1492  * The write lock being hold during loop for each bucket to avoid
1493  * any object be reference.
1494  */
1495 void
1496 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1497 {
1498         struct cfs_hash_cond_arg arg = {
1499                 .func   = func,
1500                 .arg    = data,
1501         };
1502
1503         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1504 }
1505 EXPORT_SYMBOL(cfs_hash_cond_del);
1506
1507 void
1508 cfs_hash_for_each(struct cfs_hash *hs,
1509                   cfs_hash_for_each_cb_t func, void *data)
1510 {
1511         cfs_hash_for_each_tight(hs, func, data, 0);
1512 }
1513 EXPORT_SYMBOL(cfs_hash_for_each);
1514
1515 void
1516 cfs_hash_for_each_safe(struct cfs_hash *hs,
1517                        cfs_hash_for_each_cb_t func, void *data)
1518 {
1519         cfs_hash_for_each_tight(hs, func, data, 1);
1520 }
1521 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1522
1523 static int
1524 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1525               struct hlist_node *hnode, void *data)
1526 {
1527         *(int *)data = 0;
1528         return 1; /* return 1 to break the loop */
1529 }
1530
1531 int
1532 cfs_hash_is_empty(struct cfs_hash *hs)
1533 {
1534         int empty = 1;
1535
1536         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1537         return empty;
1538 }
1539 EXPORT_SYMBOL(cfs_hash_is_empty);
1540
1541 __u64
1542 cfs_hash_size_get(struct cfs_hash *hs)
1543 {
1544         return cfs_hash_with_counter(hs) ?
1545                atomic_read(&hs->hs_count) :
1546                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1547 }
1548 EXPORT_SYMBOL(cfs_hash_size_get);
1549
1550 /*
1551  * cfs_hash_for_each_relax:
1552  * Iterate the hash table and call @func on each item without
1553  * any lock. This function can't guarantee to finish iteration
1554  * if these features are enabled:
1555  *
1556  *  a. if rehash_key is enabled, an item can be moved from
1557  *     one bucket to another bucket
1558  *  b. user can remove non-zero-ref item from hash-table,
1559  *     so the item can be removed from hash-table, even worse,
1560  *     it's possible that user changed key and insert to another
1561  *     hash bucket.
1562  * there's no way for us to finish iteration correctly on previous
1563  * two cases, so iteration has to be stopped on change.
1564  */
1565 static int
1566 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1567                         void *data, int start)
1568 {
1569         struct hlist_node       *hnode;
1570         struct hlist_node       *next = NULL;
1571         struct cfs_hash_bd      bd;
1572         __u32                   version;
1573         int                     count = 0;
1574         int                     stop_on_change;
1575         int                     has_put_locked;
1576         int                     rc = 0;
1577         int                     i, end = -1;
1578         ENTRY;
1579
1580         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1581                          !cfs_hash_with_no_itemref(hs);
1582         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1583         cfs_hash_lock(hs, 0);
1584 again:
1585         LASSERT(!cfs_hash_is_rehashing(hs));
1586
1587         cfs_hash_for_each_bucket(hs, &bd, i) {
1588                 struct hlist_head *hhead;
1589
1590                 if (i < start)
1591                         continue;
1592                 else if (end > 0 && i >= end)
1593                         break;
1594
1595                 cfs_hash_bd_lock(hs, &bd, 0);
1596                 version = cfs_hash_bd_version_get(&bd);
1597
1598                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1599                         hnode = hhead->first;
1600                         if (hnode == NULL)
1601                                 continue;
1602                         cfs_hash_get(hs, hnode);
1603                         for (; hnode != NULL; hnode = next) {
1604                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1605                                 next = hnode->next;
1606                                 if (next != NULL)
1607                                         cfs_hash_get(hs, next);
1608                                 cfs_hash_bd_unlock(hs, &bd, 0);
1609                                 cfs_hash_unlock(hs, 0);
1610
1611                                 rc = func(hs, &bd, hnode, data);
1612                                 if (stop_on_change || !has_put_locked)
1613                                         cfs_hash_put(hs, hnode);
1614
1615                                 cond_resched();
1616                                 count++;
1617
1618                                 cfs_hash_lock(hs, 0);
1619                                 cfs_hash_bd_lock(hs, &bd, 0);
1620                                 if (stop_on_change) {
1621                                         if (version !=
1622                                             cfs_hash_bd_version_get(&bd))
1623                                                 rc = -EINTR;
1624                                 } else if (has_put_locked) {
1625                                         cfs_hash_put_locked(hs, hnode);
1626                                 }
1627                                 if (rc) /* callback wants to break iteration */
1628                                         break;
1629                         }
1630                         if (next != NULL) {
1631                                 if (has_put_locked) {
1632                                         cfs_hash_put_locked(hs, next);
1633                                         next = NULL;
1634                                 }
1635                                 break;
1636                         } else if (rc != 0) {
1637                                 break;
1638                         }
1639                 }
1640                 cfs_hash_bd_unlock(hs, &bd, 0);
1641                 if (next != NULL && !has_put_locked) {
1642                         cfs_hash_put(hs, next);
1643                         next = NULL;
1644                 }
1645                 if (rc) /* callback wants to break iteration */
1646                         break;
1647         }
1648
1649         if (start > 0 && rc == 0) {
1650                 end = start;
1651                 start = 0;
1652                 goto again;
1653         }
1654
1655         cfs_hash_unlock(hs, 0);
1656         return count;
1657 }
1658
1659 int
1660 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1661                          cfs_hash_for_each_cb_t func, void *data, int start)
1662 {
1663         ENTRY;
1664
1665         if (cfs_hash_with_no_lock(hs) ||
1666             cfs_hash_with_rehash_key(hs) ||
1667             !cfs_hash_with_no_itemref(hs))
1668                 RETURN(-EOPNOTSUPP);
1669
1670         if (hs->hs_ops->hs_get == NULL ||
1671            (hs->hs_ops->hs_put == NULL &&
1672             hs->hs_ops->hs_put_locked == NULL))
1673                 RETURN(-EOPNOTSUPP);
1674
1675         cfs_hash_for_each_enter(hs);
1676         cfs_hash_for_each_relax(hs, func, data, start);
1677         cfs_hash_for_each_exit(hs);
1678
1679         RETURN(0);
1680 }
1681 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1682
1683 /**
1684  * For each hash bucket in the libcfs hash @hs call the passed callback
1685  * @func until all the hash buckets are empty.  The passed callback @func
1686  * or the previously registered callback hs->hs_put must remove the item
1687  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1688  * functions.  No rwlocks will be held during the callback @func it is
1689  * safe to sleep if needed.  This function will not terminate until the
1690  * hash is empty.  Note it is still possible to concurrently add new
1691  * items in to the hash.  It is the callers responsibility to ensure
1692  * the required locking is in place to prevent concurrent insertions.
1693  */
1694 int
1695 cfs_hash_for_each_empty(struct cfs_hash *hs,
1696                         cfs_hash_for_each_cb_t func, void *data)
1697 {
1698         unsigned  i = 0;
1699         ENTRY;
1700
1701         if (cfs_hash_with_no_lock(hs))
1702                 return -EOPNOTSUPP;
1703
1704         if (hs->hs_ops->hs_get == NULL ||
1705            (hs->hs_ops->hs_put == NULL &&
1706             hs->hs_ops->hs_put_locked == NULL))
1707                 return -EOPNOTSUPP;
1708
1709         cfs_hash_for_each_enter(hs);
1710         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1711                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1712                        hs->hs_name, i++);
1713         }
1714         cfs_hash_for_each_exit(hs);
1715         RETURN(0);
1716 }
1717 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1718
1719 void
1720 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1721                         cfs_hash_for_each_cb_t func, void *data)
1722 {
1723         struct hlist_head *hhead;
1724         struct hlist_node *hnode;
1725         struct cfs_hash_bd         bd;
1726
1727         cfs_hash_for_each_enter(hs);
1728         cfs_hash_lock(hs, 0);
1729         if (hindex >= CFS_HASH_NHLIST(hs))
1730                 goto out;
1731
1732         cfs_hash_bd_index_set(hs, hindex, &bd);
1733
1734         cfs_hash_bd_lock(hs, &bd, 0);
1735         hhead = cfs_hash_bd_hhead(hs, &bd);
1736         hlist_for_each(hnode, hhead) {
1737                 if (func(hs, &bd, hnode, data))
1738                         break;
1739         }
1740         cfs_hash_bd_unlock(hs, &bd, 0);
1741 out:
1742         cfs_hash_unlock(hs, 0);
1743         cfs_hash_for_each_exit(hs);
1744 }
1745
1746 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1747
1748 /*
1749  * For each item in the libcfs hash @hs which matches the @key call
1750  * the passed callback @func and pass to it as an argument each hash
1751  * item and the private @data. During the callback the bucket lock
1752  * is held so the callback must never sleep.
1753    */
1754 void
1755 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1756                         cfs_hash_for_each_cb_t func, void *data)
1757 {
1758         struct hlist_node *hnode;
1759         struct cfs_hash_bd         bds[2];
1760         unsigned           i;
1761
1762         cfs_hash_lock(hs, 0);
1763
1764         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1765
1766         cfs_hash_for_each_bd(bds, 2, i) {
1767                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1768
1769                 hlist_for_each(hnode, hlist) {
1770                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1771
1772                         if (cfs_hash_keycmp(hs, key, hnode)) {
1773                                 if (func(hs, &bds[i], hnode, data))
1774                                         break;
1775                         }
1776                 }
1777         }
1778
1779         cfs_hash_dual_bd_unlock(hs, bds, 0);
1780         cfs_hash_unlock(hs, 0);
1781 }
1782 EXPORT_SYMBOL(cfs_hash_for_each_key);
1783
1784 /**
1785  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1786  * to grow the hash size when excessive chaining is detected, or to
1787  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1788  * flag is set in @hs the libcfs hash may be dynamically rehashed
1789  * during addition or removal if the hash's theta value exceeds
1790  * either the hs->hs_min_theta or hs->max_theta values.  By default
1791  * these values are tuned to keep the chained hash depth small, and
1792  * this approach assumes a reasonably uniform hashing function.  The
1793  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1794  */
1795 void
1796 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1797 {
1798         int     i;
1799
1800         /* need hold cfs_hash_lock(hs, 1) */
1801         LASSERT(cfs_hash_with_rehash(hs) &&
1802                 !cfs_hash_with_no_lock(hs));
1803
1804         if (!cfs_hash_is_rehashing(hs))
1805                 return;
1806
1807         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1808                 hs->hs_rehash_bits = 0;
1809                 return;
1810         }
1811
1812         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1813                 cfs_hash_unlock(hs, 1);
1814                 /* raise console warning while waiting too long */
1815                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1816                        "hash %s is still rehashing, rescheded %d\n",
1817                        hs->hs_name, i - 1);
1818                 cond_resched();
1819                 cfs_hash_lock(hs, 1);
1820         }
1821 }
1822
1823 void
1824 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1825 {
1826         cfs_hash_lock(hs, 1);
1827         cfs_hash_rehash_cancel_locked(hs);
1828         cfs_hash_unlock(hs, 1);
1829 }
1830
1831 int
1832 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1833 {
1834         int     rc;
1835
1836         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1837
1838         cfs_hash_lock(hs, 1);
1839
1840         rc = cfs_hash_rehash_bits(hs);
1841         if (rc <= 0) {
1842                 cfs_hash_unlock(hs, 1);
1843                 return rc;
1844         }
1845
1846         hs->hs_rehash_bits = rc;
1847         if (!do_rehash) {
1848                 /* launch and return */
1849                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1850                 cfs_hash_unlock(hs, 1);
1851                 return 0;
1852         }
1853
1854         /* rehash right now */
1855         cfs_hash_unlock(hs, 1);
1856
1857         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1858 }
1859
1860 static int
1861 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1862 {
1863         struct cfs_hash_bd      new;
1864         struct hlist_head *hhead;
1865         struct hlist_node *hnode;
1866         struct hlist_node *pos;
1867         void              *key;
1868         int                c = 0;
1869
1870         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1871         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1872                 hlist_for_each_safe(hnode, pos, hhead) {
1873                         key = cfs_hash_key(hs, hnode);
1874                         LASSERT(key != NULL);
1875                         /* Validate hnode is in the correct bucket. */
1876                         cfs_hash_bucket_validate(hs, old, hnode);
1877                         /*
1878                          * Delete from old hash bucket; move to new bucket.
1879                          * ops->hs_key must be defined.
1880                          */
1881                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1882                                              hs->hs_rehash_bits, key, &new);
1883                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1884                         c++;
1885                 }
1886         }
1887         return c;
1888 }
1889
1890 static int
1891 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1892 {
1893         struct cfs_hash         *hs =
1894                 container_of(wi, struct cfs_hash, hs_rehash_wi);
1895         struct cfs_hash_bucket **bkts;
1896         struct cfs_hash_bd      bd;
1897         unsigned int            old_size;
1898         unsigned int            new_size;
1899         int                     bsize;
1900         int                     count = 0;
1901         int                     rc = 0;
1902         int                     i;
1903
1904         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1905
1906         cfs_hash_lock(hs, 0);
1907         LASSERT(cfs_hash_is_rehashing(hs));
1908
1909         old_size = CFS_HASH_NBKT(hs);
1910         new_size = CFS_HASH_RH_NBKT(hs);
1911
1912         cfs_hash_unlock(hs, 0);
1913
1914         /*
1915          * don't need hs::hs_rwlock for hs::hs_buckets,
1916          * because nobody can change bkt-table except me.
1917          */
1918         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1919                                         old_size, new_size);
1920         cfs_hash_lock(hs, 1);
1921         if (bkts == NULL) {
1922                 rc = -ENOMEM;
1923                 goto out;
1924         }
1925
1926         if (bkts == hs->hs_buckets) {
1927                 bkts = NULL; /* do nothing */
1928                 goto out;
1929         }
1930
1931         rc = __cfs_hash_theta(hs);
1932         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1933                 /* free the new allocated bkt-table */
1934                 old_size = new_size;
1935                 new_size = CFS_HASH_NBKT(hs);
1936                 rc = -EALREADY;
1937                 goto out;
1938         }
1939
1940         LASSERT(hs->hs_rehash_buckets == NULL);
1941         hs->hs_rehash_buckets = bkts;
1942
1943         rc = 0;
1944         cfs_hash_for_each_bucket(hs, &bd, i) {
1945                 if (cfs_hash_is_exiting(hs)) {
1946                         rc = -ESRCH;
1947                         /* someone wants to destroy the hash, abort now */
1948                         if (old_size < new_size) /* OK to free old bkt-table */
1949                                 break;
1950                         /* it's shrinking, need free new bkt-table */
1951                         hs->hs_rehash_buckets = NULL;
1952                         old_size = new_size;
1953                         new_size = CFS_HASH_NBKT(hs);
1954                         goto out;
1955                 }
1956
1957                 count += cfs_hash_rehash_bd(hs, &bd);
1958                 if (count < CFS_HASH_LOOP_HOG ||
1959                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1960                         continue;
1961                 }
1962
1963                 count = 0;
1964                 cfs_hash_unlock(hs, 1);
1965                 cond_resched();
1966                 cfs_hash_lock(hs, 1);
1967         }
1968
1969         hs->hs_rehash_count++;
1970
1971         bkts = hs->hs_buckets;
1972         hs->hs_buckets = hs->hs_rehash_buckets;
1973         hs->hs_rehash_buckets = NULL;
1974
1975         hs->hs_cur_bits = hs->hs_rehash_bits;
1976  out:
1977         hs->hs_rehash_bits = 0;
1978         if (rc == -ESRCH) /* never be scheduled again */
1979                 cfs_wi_exit(cfs_sched_rehash, wi);
1980         bsize = cfs_hash_bkt_size(hs);
1981         cfs_hash_unlock(hs, 1);
1982         /* can't refer to @hs anymore because it could be destroyed */
1983         if (bkts != NULL)
1984                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1985         if (rc != 0)
1986                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1987         /* return 1 only if cfs_wi_exit is called */
1988         return rc == -ESRCH;
1989 }
1990
1991 /**
1992  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1993  * @old_key must be provided to locate the objects previous location
1994  * in the hash, and the @new_key will be used to reinsert the object.
1995  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1996  * combo when it is critical that there is no window in time where the
1997  * object is missing from the hash.  When an object is being rehashed
1998  * the registered cfs_hash_get() and cfs_hash_put() functions will
1999  * not be called.
2000  */
2001 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
2002                          void *new_key, struct hlist_node *hnode)
2003 {
2004         struct cfs_hash_bd        bds[3];
2005         struct cfs_hash_bd        old_bds[2];
2006         struct cfs_hash_bd        new_bd;
2007
2008         LASSERT(!hlist_unhashed(hnode));
2009
2010         cfs_hash_lock(hs, 0);
2011
2012         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2013         cfs_hash_bd_get(hs, new_key, &new_bd);
2014
2015         bds[0] = old_bds[0];
2016         bds[1] = old_bds[1];
2017         bds[2] = new_bd;
2018
2019         /* NB: bds[0] and bds[1] are ordered already */
2020         cfs_hash_bd_order(&bds[1], &bds[2]);
2021         cfs_hash_bd_order(&bds[0], &bds[1]);
2022
2023         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2024         if (likely(old_bds[1].bd_bucket == NULL)) {
2025                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2026         } else {
2027                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2028                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2029         }
2030         /* overwrite key inside locks, otherwise may screw up with
2031          * other operations, i.e: rehash */
2032         cfs_hash_keycpy(hs, hnode, new_key);
2033
2034         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2035         cfs_hash_unlock(hs, 0);
2036 }
2037 EXPORT_SYMBOL(cfs_hash_rehash_key);
2038
2039 void cfs_hash_debug_header(struct seq_file *m)
2040 {
2041         seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2042                    CFS_HASH_BIGNAME_LEN, "name");
2043 }
2044 EXPORT_SYMBOL(cfs_hash_debug_header);
2045
2046 static struct cfs_hash_bucket **
2047 cfs_hash_full_bkts(struct cfs_hash *hs)
2048 {
2049         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2050         if (hs->hs_rehash_buckets == NULL)
2051                 return hs->hs_buckets;
2052
2053         LASSERT(hs->hs_rehash_bits != 0);
2054         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2055                hs->hs_rehash_buckets : hs->hs_buckets;
2056 }
2057
2058 static unsigned int
2059 cfs_hash_full_nbkt(struct cfs_hash *hs)
2060 {
2061         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2062         if (hs->hs_rehash_buckets == NULL)
2063                 return CFS_HASH_NBKT(hs);
2064
2065         LASSERT(hs->hs_rehash_bits != 0);
2066         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2067                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2068 }
2069
2070 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2071 {
2072         int dist[8] = { 0, };
2073         int maxdep = -1;
2074         int maxdepb = -1;
2075         int total = 0;
2076         int theta;
2077         int i;
2078
2079         cfs_hash_lock(hs, 0);
2080         theta = __cfs_hash_theta(hs);
2081
2082         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2083                    CFS_HASH_BIGNAME_LEN, hs->hs_name,
2084                    1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2085                    1 << hs->hs_max_bits,
2086                    __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2087                    __cfs_hash_theta_int(hs->hs_min_theta),
2088                    __cfs_hash_theta_frac(hs->hs_min_theta),
2089                    __cfs_hash_theta_int(hs->hs_max_theta),
2090                    __cfs_hash_theta_frac(hs->hs_max_theta),
2091                    hs->hs_flags, hs->hs_rehash_count);
2092
2093         /*
2094          * The distribution is a summary of the chained hash depth in
2095          * each of the libcfs hash buckets.  Each buckets hsb_count is
2096          * divided by the hash theta value and used to generate a
2097          * histogram of the hash distribution.  A uniform hash will
2098          * result in all hash buckets being close to the average thus
2099          * only the first few entries in the histogram will be non-zero.
2100          * If you hash function results in a non-uniform hash the will
2101          * be observable by outlier bucks in the distribution histogram.
2102          *
2103          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2104          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2105          */
2106         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2107                 struct cfs_hash_bd bd;
2108
2109                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2110                 cfs_hash_bd_lock(hs, &bd, 0);
2111                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2112                         maxdep  = bd.bd_bucket->hsb_depmax;
2113                         maxdepb = ffz(~maxdep);
2114                 }
2115                 total += bd.bd_bucket->hsb_count;
2116                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2117                 cfs_hash_bd_unlock(hs, &bd, 0);
2118         }
2119
2120         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2121         for (i = 0; i < 8; i++)
2122                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2123
2124         cfs_hash_unlock(hs, 0);
2125 }
2126 EXPORT_SYMBOL(cfs_hash_debug_str);