Whamcloud - gitweb
LU-6245 libcfs: remove tcpip abstraction from libcfs
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(intead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can speicify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109 #include <linux/seq_file.h>
110
111 #include <libcfs/libcfs.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
116                 "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
122 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
126
127 static inline void
128 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
129 __acquires(&lock->spin)
130 {
131         spin_lock(&lock->spin);
132 }
133
134 static inline void
135 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
136 __releases(&lock->spin)
137 {
138         spin_unlock(&lock->spin);
139 }
140
141 static inline void
142 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
143 __acquires(&lock->rw)
144 {
145         if (!exclusive)
146                 read_lock(&lock->rw);
147         else
148                 write_lock(&lock->rw);
149 }
150
151 static inline void
152 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
153 __releases(&lock->rw)
154 {
155         if (!exclusive)
156                 read_unlock(&lock->rw);
157         else
158                 write_unlock(&lock->rw);
159 }
160
161 /** No lock hash */
162 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
163 {
164         .hs_lock        = cfs_hash_nl_lock,
165         .hs_unlock      = cfs_hash_nl_unlock,
166         .hs_bkt_lock    = cfs_hash_nl_lock,
167         .hs_bkt_unlock  = cfs_hash_nl_unlock,
168 };
169
170 /** no bucket lock, one spinlock to protect everything */
171 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
172 {
173         .hs_lock        = cfs_hash_spin_lock,
174         .hs_unlock      = cfs_hash_spin_unlock,
175         .hs_bkt_lock    = cfs_hash_nl_lock,
176         .hs_bkt_unlock  = cfs_hash_nl_unlock,
177 };
178
179 /** spin bucket lock, rehash is enabled */
180 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
181 {
182         .hs_lock        = cfs_hash_rw_lock,
183         .hs_unlock      = cfs_hash_rw_unlock,
184         .hs_bkt_lock    = cfs_hash_spin_lock,
185         .hs_bkt_unlock  = cfs_hash_spin_unlock,
186 };
187
188 /** rw bucket lock, rehash is enabled */
189 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
190 {
191         .hs_lock        = cfs_hash_rw_lock,
192         .hs_unlock      = cfs_hash_rw_unlock,
193         .hs_bkt_lock    = cfs_hash_rw_lock,
194         .hs_bkt_unlock  = cfs_hash_rw_unlock,
195 };
196
197 /** spin bucket lock, rehash is disabled */
198 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
199 {
200         .hs_lock        = cfs_hash_nl_lock,
201         .hs_unlock      = cfs_hash_nl_unlock,
202         .hs_bkt_lock    = cfs_hash_spin_lock,
203         .hs_bkt_unlock  = cfs_hash_spin_unlock,
204 };
205
206 /** rw bucket lock, rehash is disabled */
207 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
208 {
209         .hs_lock        = cfs_hash_nl_lock,
210         .hs_unlock      = cfs_hash_nl_unlock,
211         .hs_bkt_lock    = cfs_hash_rw_lock,
212         .hs_bkt_unlock  = cfs_hash_rw_unlock,
213 };
214
215 static void
216 cfs_hash_lock_setup(cfs_hash_t *hs)
217 {
218         if (cfs_hash_with_no_lock(hs)) {
219                 hs->hs_lops = &cfs_hash_nl_lops;
220
221         } else if (cfs_hash_with_no_bktlock(hs)) {
222                 hs->hs_lops = &cfs_hash_nbl_lops;
223                 spin_lock_init(&hs->hs_lock.spin);
224
225         } else if (cfs_hash_with_rehash(hs)) {
226                 rwlock_init(&hs->hs_lock.rw);
227
228                 if (cfs_hash_with_rw_bktlock(hs))
229                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
230                 else if (cfs_hash_with_spin_bktlock(hs))
231                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
232                 else
233                         LBUG();
234         } else {
235                 if (cfs_hash_with_rw_bktlock(hs))
236                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
237                 else if (cfs_hash_with_spin_bktlock(hs))
238                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
239                 else
240                         LBUG();
241         }
242 }
243
244 /**
245  * Simple hash head without depth tracking
246  * new element is always added to head of hlist
247  */
248 typedef struct {
249         struct hlist_head       hh_head;        /**< entries list */
250 } cfs_hash_head_t;
251
252 static int
253 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
254 {
255         return sizeof(cfs_hash_head_t);
256 }
257
258 static struct hlist_head *
259 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
260 {
261         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
262
263         return &head[bd->bd_offset].hh_head;
264 }
265
266 static int
267 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
268                       struct hlist_node *hnode)
269 {
270         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
271         return -1; /* unknown depth */
272 }
273
274 static int
275 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
276                       struct hlist_node *hnode)
277 {
278         hlist_del_init(hnode);
279         return -1; /* unknown depth */
280 }
281
282 /**
283  * Simple hash head with depth tracking
284  * new element is always added to head of hlist
285  */
286 typedef struct {
287         struct hlist_head       hd_head;        /**< entries list */
288         unsigned int            hd_depth;       /**< list length */
289 } cfs_hash_head_dep_t;
290
291 static int
292 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
293 {
294         return sizeof(cfs_hash_head_dep_t);
295 }
296
297 static struct hlist_head *
298 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
299 {
300         cfs_hash_head_dep_t   *head;
301
302         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
303         return &head[bd->bd_offset].hd_head;
304 }
305
306 static int
307 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
308                       struct hlist_node *hnode)
309 {
310         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
311                                                cfs_hash_head_dep_t, hd_head);
312         hlist_add_head(hnode, &hh->hd_head);
313         return ++hh->hd_depth;
314 }
315
316 static int
317 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
318                       struct hlist_node *hnode)
319 {
320         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
321                                                cfs_hash_head_dep_t, hd_head);
322         hlist_del_init(hnode);
323         return --hh->hd_depth;
324 }
325
326 /**
327  * double links hash head without depth tracking
328  * new element is always added to tail of hlist
329  */
330 typedef struct {
331         struct hlist_head       dh_head;        /**< entries list */
332         struct hlist_node       *dh_tail;       /**< the last entry */
333 } cfs_hash_dhead_t;
334
335 static int
336 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
337 {
338         return sizeof(cfs_hash_dhead_t);
339 }
340
341 static struct hlist_head *
342 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
343 {
344         cfs_hash_dhead_t *head;
345
346         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
347         return &head[bd->bd_offset].dh_head;
348 }
349
350 static int
351 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
352                       struct hlist_node *hnode)
353 {
354         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
355                                             cfs_hash_dhead_t, dh_head);
356
357         if (dh->dh_tail != NULL) /* not empty */
358                 hlist_add_after(dh->dh_tail, hnode);
359         else /* empty list */
360                 hlist_add_head(hnode, &dh->dh_head);
361         dh->dh_tail = hnode;
362         return -1; /* unknown depth */
363 }
364
365 static int
366 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
367                       struct hlist_node *hnd)
368 {
369         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
370                                             cfs_hash_dhead_t, dh_head);
371
372         if (hnd->next == NULL) { /* it's the tail */
373                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
374                               container_of(hnd->pprev, struct hlist_node, next);
375         }
376         hlist_del_init(hnd);
377         return -1; /* unknown depth */
378 }
379
380 /**
381  * double links hash head with depth tracking
382  * new element is always added to tail of hlist
383  */
384 typedef struct {
385         struct hlist_head       dd_head;        /**< entries list */
386         struct hlist_node       *dd_tail;       /**< the last entry */
387         unsigned int            dd_depth;       /**< list length */
388 } cfs_hash_dhead_dep_t;
389
390 static int
391 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
392 {
393         return sizeof(cfs_hash_dhead_dep_t);
394 }
395
396 static struct hlist_head *
397 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
398 {
399         cfs_hash_dhead_dep_t *head;
400
401         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
402         return &head[bd->bd_offset].dd_head;
403 }
404
405 static int
406 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
407                       struct hlist_node *hnode)
408 {
409         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
410                                                 cfs_hash_dhead_dep_t, dd_head);
411
412         if (dh->dd_tail != NULL) /* not empty */
413                 hlist_add_after(dh->dd_tail, hnode);
414         else /* empty list */
415                 hlist_add_head(hnode, &dh->dd_head);
416         dh->dd_tail = hnode;
417         return ++dh->dd_depth;
418 }
419
420 static int
421 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
422                       struct hlist_node *hnd)
423 {
424         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
425                                                 cfs_hash_dhead_dep_t, dd_head);
426
427         if (hnd->next == NULL) { /* it's the tail */
428                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
429                               container_of(hnd->pprev, struct hlist_node, next);
430         }
431         hlist_del_init(hnd);
432         return --dh->dd_depth;
433 }
434
435 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
436        .hop_hhead      = cfs_hash_hh_hhead,
437        .hop_hhead_size = cfs_hash_hh_hhead_size,
438        .hop_hnode_add  = cfs_hash_hh_hnode_add,
439        .hop_hnode_del  = cfs_hash_hh_hnode_del,
440 };
441
442 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
443        .hop_hhead      = cfs_hash_hd_hhead,
444        .hop_hhead_size = cfs_hash_hd_hhead_size,
445        .hop_hnode_add  = cfs_hash_hd_hnode_add,
446        .hop_hnode_del  = cfs_hash_hd_hnode_del,
447 };
448
449 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
450        .hop_hhead      = cfs_hash_dh_hhead,
451        .hop_hhead_size = cfs_hash_dh_hhead_size,
452        .hop_hnode_add  = cfs_hash_dh_hnode_add,
453        .hop_hnode_del  = cfs_hash_dh_hnode_del,
454 };
455
456 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
457        .hop_hhead      = cfs_hash_dd_hhead,
458        .hop_hhead_size = cfs_hash_dd_hhead_size,
459        .hop_hnode_add  = cfs_hash_dd_hnode_add,
460        .hop_hnode_del  = cfs_hash_dd_hnode_del,
461 };
462
463 static void
464 cfs_hash_hlist_setup(cfs_hash_t *hs)
465 {
466         if (cfs_hash_with_add_tail(hs)) {
467                 hs->hs_hops = cfs_hash_with_depth(hs) ?
468                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
469         } else {
470                 hs->hs_hops = cfs_hash_with_depth(hs) ?
471                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
472         }
473 }
474
475 static void
476 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
477                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
478 {
479         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
480
481         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
482
483         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
484         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
485 }
486
487 void
488 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
489 {
490         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
491         if (likely(hs->hs_rehash_buckets == NULL)) {
492                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
493                                      hs->hs_cur_bits, key, bd);
494         } else {
495                 LASSERT(hs->hs_rehash_bits != 0);
496                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
497                                      hs->hs_rehash_bits, key, bd);
498         }
499 }
500 EXPORT_SYMBOL(cfs_hash_bd_get);
501
502 static inline void
503 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
504 {
505         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
506                 return;
507
508         bd->bd_bucket->hsb_depmax = dep_cur;
509 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
510         if (likely(warn_on_depth == 0 ||
511                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
512                 return;
513
514         spin_lock(&hs->hs_dep_lock);
515         hs->hs_dep_max  = dep_cur;
516         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
517         hs->hs_dep_off  = bd->bd_offset;
518         hs->hs_dep_bits = hs->hs_cur_bits;
519         spin_unlock(&hs->hs_dep_lock);
520
521         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
522 # endif
523 }
524
525 void
526 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
527                         struct hlist_node *hnode)
528 {
529         int rc;
530
531         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
532         cfs_hash_bd_dep_record(hs, bd, rc);
533         bd->bd_bucket->hsb_version++;
534         if (unlikely(bd->bd_bucket->hsb_version == 0))
535                 bd->bd_bucket->hsb_version++;
536         bd->bd_bucket->hsb_count++;
537
538         if (cfs_hash_with_counter(hs))
539                 atomic_inc(&hs->hs_count);
540         if (!cfs_hash_with_no_itemref(hs))
541                 cfs_hash_get(hs, hnode);
542 }
543 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
544
545 void
546 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
547                        struct hlist_node *hnode)
548 {
549         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
550
551         LASSERT(bd->bd_bucket->hsb_count > 0);
552         bd->bd_bucket->hsb_count--;
553         bd->bd_bucket->hsb_version++;
554         if (unlikely(bd->bd_bucket->hsb_version == 0))
555                 bd->bd_bucket->hsb_version++;
556
557         if (cfs_hash_with_counter(hs)) {
558                 LASSERT(atomic_read(&hs->hs_count) > 0);
559                 atomic_dec(&hs->hs_count);
560         }
561         if (!cfs_hash_with_no_itemref(hs))
562                 cfs_hash_put_locked(hs, hnode);
563 }
564 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
565
566 void
567 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
568                         cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
569 {
570         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
571         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
572         int                rc;
573
574         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
575                 return;
576
577         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
578          * in cfs_hash_bd_del/add_locked */
579         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
580         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
581         cfs_hash_bd_dep_record(hs, bd_new, rc);
582
583         LASSERT(obkt->hsb_count > 0);
584         obkt->hsb_count--;
585         obkt->hsb_version++;
586         if (unlikely(obkt->hsb_version == 0))
587                 obkt->hsb_version++;
588         nbkt->hsb_count++;
589         nbkt->hsb_version++;
590         if (unlikely(nbkt->hsb_version == 0))
591                 nbkt->hsb_version++;
592 }
593 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
594
595 enum {
596         /** always set, for sanity (avoid ZERO intent) */
597         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
598         /** return entry with a ref */
599         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
600         /** add entry if not existing */
601         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
602         /** delete entry, ignore other masks */
603         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
604 };
605
606 typedef enum cfs_hash_lookup_intent {
607         /** return item w/o refcount */
608         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
609         /** return item with refcount */
610         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
611                                        CFS_HS_LOOKUP_MASK_REF),
612         /** return item w/o refcount if existed, otherwise add */
613         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
614                                        CFS_HS_LOOKUP_MASK_ADD),
615         /** return item with refcount if existed, otherwise add */
616         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
617                                        CFS_HS_LOOKUP_MASK_ADD),
618         /** delete if existed */
619         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
620                                        CFS_HS_LOOKUP_MASK_DEL)
621 } cfs_hash_lookup_intent_t;
622
623 static struct hlist_node *
624 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
625                           const void *key, struct hlist_node *hnode,
626                           cfs_hash_lookup_intent_t intent)
627
628 {
629         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
630         struct hlist_node  *ehnode;
631         struct hlist_node  *match;
632         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
633
634         /* with this function, we can avoid a lot of useless refcount ops,
635          * which are expensive atomic operations most time. */
636         match = intent_add ? NULL : hnode;
637         hlist_for_each(ehnode, hhead) {
638                 if (!cfs_hash_keycmp(hs, key, ehnode))
639                         continue;
640
641                 if (match != NULL && match != ehnode) /* can't match */
642                         continue;
643
644                 /* match and ... */
645                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
646                         cfs_hash_bd_del_locked(hs, bd, ehnode);
647                         return ehnode;
648                 }
649
650                 /* caller wants refcount? */
651                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
652                         cfs_hash_get(hs, ehnode);
653                 return ehnode;
654         }
655         /* no match item */
656         if (!intent_add)
657                 return NULL;
658
659         LASSERT(hnode != NULL);
660         cfs_hash_bd_add_locked(hs, bd, hnode);
661         return hnode;
662 }
663
664 struct hlist_node *
665 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
666 {
667         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
668                                         CFS_HS_LOOKUP_IT_FIND);
669 }
670 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
671
672 struct hlist_node *
673 cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
674 {
675         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
676                                         CFS_HS_LOOKUP_IT_PEEK);
677 }
678 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
679
680 struct hlist_node *
681 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
682                            const void *key, struct hlist_node *hnode,
683                            int noref)
684 {
685         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
686                                         CFS_HS_LOOKUP_IT_ADD |
687                                         (!noref * CFS_HS_LOOKUP_MASK_REF));
688 }
689 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
690
691 struct hlist_node *
692 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
693                            const void *key, struct hlist_node *hnode)
694 {
695         /* hnode can be NULL, we find the first item with @key */
696         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
697                                         CFS_HS_LOOKUP_IT_FINDDEL);
698 }
699 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
700
701 static void
702 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
703                        unsigned n, int excl)
704 {
705         cfs_hash_bucket_t *prev = NULL;
706         int                i;
707
708         /**
709          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
710          * NB: it's possible that several bds point to the same bucket but
711          * have different bd::bd_offset, so need take care of deadlock.
712          */
713         cfs_hash_for_each_bd(bds, n, i) {
714                 if (prev == bds[i].bd_bucket)
715                         continue;
716
717                 LASSERT(prev == NULL ||
718                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
719                 cfs_hash_bd_lock(hs, &bds[i], excl);
720                 prev = bds[i].bd_bucket;
721         }
722 }
723
724 static void
725 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
726                          unsigned n, int excl)
727 {
728         cfs_hash_bucket_t *prev = NULL;
729         int                i;
730
731         cfs_hash_for_each_bd(bds, n, i) {
732                 if (prev != bds[i].bd_bucket) {
733                         cfs_hash_bd_unlock(hs, &bds[i], excl);
734                         prev = bds[i].bd_bucket;
735                 }
736         }
737 }
738
739 static struct hlist_node *
740 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
741                                 unsigned n, const void *key)
742 {
743         struct hlist_node *ehnode;
744         unsigned          i;
745
746         cfs_hash_for_each_bd(bds, n, i) {
747                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
748                                                         CFS_HS_LOOKUP_IT_FIND);
749                 if (ehnode != NULL)
750                         return ehnode;
751         }
752         return NULL;
753 }
754
755 static struct hlist_node *
756 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
757                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
758                                  struct hlist_node *hnode, int noref)
759 {
760         struct hlist_node *ehnode;
761         int               intent;
762         unsigned          i;
763
764         LASSERT(hnode != NULL);
765         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
766
767         cfs_hash_for_each_bd(bds, n, i) {
768                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
769                                                    NULL, intent);
770                 if (ehnode != NULL)
771                         return ehnode;
772         }
773
774         if (i == 1) { /* only one bucket */
775                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
776         } else {
777                 cfs_hash_bd_t      mybd;
778
779                 cfs_hash_bd_get(hs, key, &mybd);
780                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
781         }
782
783         return hnode;
784 }
785
786 static struct hlist_node *
787 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
788                                  unsigned n, const void *key,
789                                  struct hlist_node *hnode)
790 {
791         struct hlist_node *ehnode;
792         unsigned           i;
793
794         cfs_hash_for_each_bd(bds, n, i) {
795                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
796                                                    CFS_HS_LOOKUP_IT_FINDDEL);
797                 if (ehnode != NULL)
798                         return ehnode;
799         }
800         return NULL;
801 }
802
803 static void
804 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
805 {
806         int     rc;
807
808         if (bd2->bd_bucket == NULL)
809                 return;
810
811         if (bd1->bd_bucket == NULL) {
812                 *bd1 = *bd2;
813                 bd2->bd_bucket = NULL;
814                 return;
815         }
816
817         rc = cfs_hash_bd_compare(bd1, bd2);
818         if (rc == 0) {
819                 bd2->bd_bucket = NULL;
820
821         } else if (rc > 0) { /* swab bd1 and bd2 */
822                 cfs_hash_bd_t tmp;
823
824                 tmp = *bd2;
825                 *bd2 = *bd1;
826                 *bd1 = tmp;
827         }
828 }
829
830 void
831 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
832 {
833         /* NB: caller should hold hs_lock.rw if REHASH is set */
834         cfs_hash_bd_from_key(hs, hs->hs_buckets,
835                              hs->hs_cur_bits, key, &bds[0]);
836         if (likely(hs->hs_rehash_buckets == NULL)) {
837                 /* no rehash or not rehashing */
838                 bds[1].bd_bucket = NULL;
839                 return;
840         }
841
842         LASSERT(hs->hs_rehash_bits != 0);
843         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
844                              hs->hs_rehash_bits, key, &bds[1]);
845
846         cfs_hash_bd_order(&bds[0], &bds[1]);
847 }
848 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
849
850 void
851 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
852 {
853         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
854 }
855 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
856
857 void
858 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
859 {
860         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
861 }
862 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
863
864 struct hlist_node *
865 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
866                                const void *key)
867 {
868         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
869 }
870 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
871
872 struct hlist_node *
873 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
874                                 const void *key, struct hlist_node *hnode,
875                                 int noref)
876 {
877         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
878                                                 hnode, noref);
879 }
880 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
881
882 struct hlist_node *
883 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
884                                 const void *key, struct hlist_node *hnode)
885 {
886         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
887 }
888 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
889
890 static void
891 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
892                       int bkt_size, int prev_size, int size)
893 {
894         int     i;
895
896         for (i = prev_size; i < size; i++) {
897                 if (buckets[i] != NULL)
898                         LIBCFS_FREE(buckets[i], bkt_size);
899         }
900
901         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
902 }
903
904 /*
905  * Create or grow bucket memory. Return old_buckets if no allocation was
906  * needed, the newly allocated buckets if allocation was needed and
907  * successful, and NULL on error.
908  */
909 static cfs_hash_bucket_t **
910 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
911                          unsigned int old_size, unsigned int new_size)
912 {
913         cfs_hash_bucket_t **new_bkts;
914         int                 i;
915
916         LASSERT(old_size == 0 || old_bkts != NULL);
917
918         if (old_bkts != NULL && old_size == new_size)
919                 return old_bkts;
920
921         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
922         if (new_bkts == NULL)
923                 return NULL;
924
925         if (old_bkts != NULL) {
926                 memcpy(new_bkts, old_bkts,
927                        min(old_size, new_size) * sizeof(*old_bkts));
928         }
929
930         for (i = old_size; i < new_size; i++) {
931                 struct hlist_head *hhead;
932                 cfs_hash_bd_t     bd;
933
934                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
935                 if (new_bkts[i] == NULL) {
936                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
937                                               old_size, new_size);
938                         return NULL;
939                 }
940
941                 new_bkts[i]->hsb_index   = i;
942                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
943                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
944                 bd.bd_bucket = new_bkts[i];
945                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
946                         INIT_HLIST_HEAD(hhead);
947
948                 if (cfs_hash_with_no_lock(hs) ||
949                     cfs_hash_with_no_bktlock(hs))
950                         continue;
951
952                 if (cfs_hash_with_rw_bktlock(hs))
953                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
954                 else if (cfs_hash_with_spin_bktlock(hs))
955                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
956                 else
957                         LBUG(); /* invalid use-case */
958         }
959         return new_bkts;
960 }
961
962 /**
963  * Initialize new libcfs hash, where:
964  * @name     - Descriptive hash name
965  * @cur_bits - Initial hash table size, in bits
966  * @max_bits - Maximum allowed hash table resize, in bits
967  * @ops      - Registered hash table operations
968  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
969  *           - CFS_HASH_SORT enable chained hash sort
970  */
971 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
972
973 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
974 static int cfs_hash_dep_print(cfs_workitem_t *wi)
975 {
976         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
977         int         dep;
978         int         bkt;
979         int         off;
980         int         bits;
981
982         spin_lock(&hs->hs_dep_lock);
983         dep  = hs->hs_dep_max;
984         bkt  = hs->hs_dep_bkt;
985         off  = hs->hs_dep_off;
986         bits = hs->hs_dep_bits;
987         spin_unlock(&hs->hs_dep_lock);
988
989         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
990                       hs->hs_name, bits, dep, bkt, off);
991         spin_lock(&hs->hs_dep_lock);
992         hs->hs_dep_bits = 0; /* mark as workitem done */
993         spin_unlock(&hs->hs_dep_lock);
994         return 0;
995 }
996
997 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
998 {
999         spin_lock_init(&hs->hs_dep_lock);
1000         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
1001 }
1002
1003 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
1004 {
1005         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1006                 return;
1007
1008         spin_lock(&hs->hs_dep_lock);
1009         while (hs->hs_dep_bits != 0) {
1010                 spin_unlock(&hs->hs_dep_lock);
1011                 cond_resched();
1012                 spin_lock(&hs->hs_dep_lock);
1013         }
1014         spin_unlock(&hs->hs_dep_lock);
1015 }
1016
1017 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1018
1019 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1020 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1021
1022 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1023
1024 cfs_hash_t *
1025 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1026                 unsigned bkt_bits, unsigned extra_bytes,
1027                 unsigned min_theta, unsigned max_theta,
1028                 cfs_hash_ops_t *ops, unsigned flags)
1029 {
1030         cfs_hash_t *hs;
1031         int         len;
1032
1033         ENTRY;
1034
1035         CLASSERT(CFS_HASH_THETA_BITS < 15);
1036
1037         LASSERT(name != NULL);
1038         LASSERT(ops != NULL);
1039         LASSERT(ops->hs_key);
1040         LASSERT(ops->hs_hash);
1041         LASSERT(ops->hs_object);
1042         LASSERT(ops->hs_keycmp);
1043         LASSERT(ops->hs_get != NULL);
1044         LASSERT(ops->hs_put_locked != NULL);
1045
1046         if ((flags & CFS_HASH_REHASH) != 0)
1047                 flags |= CFS_HASH_COUNTER; /* must have counter */
1048
1049         LASSERT(cur_bits > 0);
1050         LASSERT(cur_bits >= bkt_bits);
1051         LASSERT(max_bits >= cur_bits && max_bits < 31);
1052         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1053         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1054                      (flags & CFS_HASH_NO_LOCK) == 0));
1055         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1056                       ops->hs_keycpy != NULL));
1057
1058         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1059               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1060         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1061         if (hs == NULL)
1062                 RETURN(NULL);
1063
1064         strlcpy(hs->hs_name, name, len);
1065         hs->hs_flags = flags;
1066
1067         atomic_set(&hs->hs_refcount, 1);
1068         atomic_set(&hs->hs_count, 0);
1069
1070         cfs_hash_lock_setup(hs);
1071         cfs_hash_hlist_setup(hs);
1072
1073         hs->hs_cur_bits = (__u8)cur_bits;
1074         hs->hs_min_bits = (__u8)cur_bits;
1075         hs->hs_max_bits = (__u8)max_bits;
1076         hs->hs_bkt_bits = (__u8)bkt_bits;
1077
1078         hs->hs_ops         = ops;
1079         hs->hs_extra_bytes = extra_bytes;
1080         hs->hs_rehash_bits = 0;
1081         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1082         cfs_hash_depth_wi_init(hs);
1083
1084         if (cfs_hash_with_rehash(hs))
1085                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1086
1087         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1088                                                   CFS_HASH_NBKT(hs));
1089         if (hs->hs_buckets != NULL)
1090                 return hs;
1091
1092         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1093         RETURN(NULL);
1094 }
1095 EXPORT_SYMBOL(cfs_hash_create);
1096
1097 /**
1098  * Cleanup libcfs hash @hs.
1099  */
1100 static void
1101 cfs_hash_destroy(cfs_hash_t *hs)
1102 {
1103         struct hlist_node     *hnode;
1104         struct hlist_node     *pos;
1105         cfs_hash_bd_t         bd;
1106         int                   i;
1107         ENTRY;
1108
1109         LASSERT(hs != NULL);
1110         LASSERT(!cfs_hash_is_exiting(hs) &&
1111                 !cfs_hash_is_iterating(hs));
1112
1113         /**
1114          * prohibit further rehashes, don't need any lock because
1115          * I'm the only (last) one can change it.
1116          */
1117         hs->hs_exiting = 1;
1118         if (cfs_hash_with_rehash(hs))
1119                 cfs_hash_rehash_cancel(hs);
1120
1121         cfs_hash_depth_wi_cancel(hs);
1122         /* rehash should be done/canceled */
1123         LASSERT(hs->hs_buckets != NULL &&
1124                 hs->hs_rehash_buckets == NULL);
1125
1126         cfs_hash_for_each_bucket(hs, &bd, i) {
1127                 struct hlist_head *hhead;
1128
1129                 LASSERT(bd.bd_bucket != NULL);
1130                 /* no need to take this lock, just for consistent code */
1131                 cfs_hash_bd_lock(hs, &bd, 1);
1132
1133                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1134                         hlist_for_each_safe(hnode, pos, hhead) {
1135                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1136                                         "hash %s bucket %u(%u) is not "
1137                                         " empty: %u items left\n",
1138                                         hs->hs_name, bd.bd_bucket->hsb_index,
1139                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1140                                 /* can't assert key valicate, because we
1141                                  * can interrupt rehash */
1142                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1143                                 cfs_hash_exit(hs, hnode);
1144                         }
1145                 }
1146                 LASSERT(bd.bd_bucket->hsb_count == 0);
1147                 cfs_hash_bd_unlock(hs, &bd, 1);
1148                 cond_resched();
1149         }
1150
1151         LASSERT(atomic_read(&hs->hs_count) == 0);
1152
1153         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1154                               0, CFS_HASH_NBKT(hs));
1155         i = cfs_hash_with_bigname(hs) ?
1156             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1157         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1158
1159         EXIT;
1160 }
1161
1162 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1163 {
1164         if (atomic_inc_not_zero(&hs->hs_refcount))
1165                 return hs;
1166         return NULL;
1167 }
1168 EXPORT_SYMBOL(cfs_hash_getref);
1169
1170 void cfs_hash_putref(cfs_hash_t *hs)
1171 {
1172         if (atomic_dec_and_test(&hs->hs_refcount))
1173                 cfs_hash_destroy(hs);
1174 }
1175 EXPORT_SYMBOL(cfs_hash_putref);
1176
1177 static inline int
1178 cfs_hash_rehash_bits(cfs_hash_t *hs)
1179 {
1180         if (cfs_hash_with_no_lock(hs) ||
1181             !cfs_hash_with_rehash(hs))
1182                 return -EOPNOTSUPP;
1183
1184         if (unlikely(cfs_hash_is_exiting(hs)))
1185                 return -ESRCH;
1186
1187         if (unlikely(cfs_hash_is_rehashing(hs)))
1188                 return -EALREADY;
1189
1190         if (unlikely(cfs_hash_is_iterating(hs)))
1191                 return -EAGAIN;
1192
1193         /* XXX: need to handle case with max_theta != 2.0
1194          *      and the case with min_theta != 0.5 */
1195         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1196             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1197                 return hs->hs_cur_bits + 1;
1198
1199         if (!cfs_hash_with_shrink(hs))
1200                 return 0;
1201
1202         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1203             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1204                 return hs->hs_cur_bits - 1;
1205
1206         return 0;
1207 }
1208
1209 /**
1210  * don't allow inline rehash if:
1211  * - user wants non-blocking change (add/del) on hash table
1212  * - too many elements
1213  */
1214 static inline int
1215 cfs_hash_rehash_inline(cfs_hash_t *hs)
1216 {
1217         return !cfs_hash_with_nblk_change(hs) &&
1218                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1219 }
1220
1221 /**
1222  * Add item @hnode to libcfs hash @hs using @key.  The registered
1223  * ops->hs_get function will be called when the item is added.
1224  */
1225 void
1226 cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1227 {
1228         cfs_hash_bd_t   bd;
1229         int             bits;
1230
1231         LASSERT(hlist_unhashed(hnode));
1232
1233         cfs_hash_lock(hs, 0);
1234         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1235
1236         cfs_hash_key_validate(hs, key, hnode);
1237         cfs_hash_bd_add_locked(hs, &bd, hnode);
1238
1239         cfs_hash_bd_unlock(hs, &bd, 1);
1240
1241         bits = cfs_hash_rehash_bits(hs);
1242         cfs_hash_unlock(hs, 0);
1243         if (bits > 0)
1244                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1245 }
1246 EXPORT_SYMBOL(cfs_hash_add);
1247
1248 static struct hlist_node *
1249 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1250                      struct hlist_node *hnode, int noref)
1251 {
1252         struct hlist_node *ehnode;
1253         cfs_hash_bd_t     bds[2];
1254         int               bits = 0;
1255
1256         LASSERT(hlist_unhashed(hnode));
1257
1258         cfs_hash_lock(hs, 0);
1259         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1260
1261         cfs_hash_key_validate(hs, key, hnode);
1262         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1263                                                  hnode, noref);
1264         cfs_hash_dual_bd_unlock(hs, bds, 1);
1265
1266         if (ehnode == hnode) /* new item added */
1267                 bits = cfs_hash_rehash_bits(hs);
1268         cfs_hash_unlock(hs, 0);
1269         if (bits > 0)
1270                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1271
1272         return ehnode;
1273 }
1274
1275 /**
1276  * Add item @hnode to libcfs hash @hs using @key.  The registered
1277  * ops->hs_get function will be called if the item was added.
1278  * Returns 0 on success or -EALREADY on key collisions.
1279  */
1280 int
1281 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1282 {
1283         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1284                -EALREADY : 0;
1285 }
1286 EXPORT_SYMBOL(cfs_hash_add_unique);
1287
1288 /**
1289  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1290  * already exists in the hash then ops->hs_get will be called on the
1291  * conflicting entry and that entry will be returned to the caller.
1292  * Otherwise ops->hs_get is called on the item which was added.
1293  */
1294 void *
1295 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1296                         struct hlist_node *hnode)
1297 {
1298         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1299
1300         return cfs_hash_object(hs, hnode);
1301 }
1302 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1303
1304 /**
1305  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1306  * is required to ensure the correct hash bucket is locked since there
1307  * is no direct linkage from the item to the bucket.  The object
1308  * removed from the hash will be returned and obs->hs_put is called
1309  * on the removed object.
1310  */
1311 void *
1312 cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1313 {
1314         void           *obj  = NULL;
1315         int             bits = 0;
1316         cfs_hash_bd_t   bds[2];
1317
1318         cfs_hash_lock(hs, 0);
1319         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1320
1321         /* NB: do nothing if @hnode is not in hash table */
1322         if (hnode == NULL || !hlist_unhashed(hnode)) {
1323                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1324                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1325                 } else {
1326                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1327                                                                 key, hnode);
1328                 }
1329         }
1330
1331         if (hnode != NULL) {
1332                 obj  = cfs_hash_object(hs, hnode);
1333                 bits = cfs_hash_rehash_bits(hs);
1334         }
1335
1336         cfs_hash_dual_bd_unlock(hs, bds, 1);
1337         cfs_hash_unlock(hs, 0);
1338         if (bits > 0)
1339                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1340
1341         return obj;
1342 }
1343 EXPORT_SYMBOL(cfs_hash_del);
1344
1345 /**
1346  * Delete item given @key in libcfs hash @hs.  The first @key found in
1347  * the hash will be removed, if the key exists multiple times in the hash
1348  * @hs this function must be called once per key.  The removed object
1349  * will be returned and ops->hs_put is called on the removed object.
1350  */
1351 void *
1352 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1353 {
1354         return cfs_hash_del(hs, key, NULL);
1355 }
1356 EXPORT_SYMBOL(cfs_hash_del_key);
1357
1358 /**
1359  * Lookup an item using @key in the libcfs hash @hs and return it.
1360  * If the @key is found in the hash hs->hs_get() is called and the
1361  * matching objects is returned.  It is the callers responsibility
1362  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1363  * when when finished with the object.  If the @key was not found
1364  * in the hash @hs NULL is returned.
1365  */
1366 void *
1367 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1368 {
1369         void                 *obj = NULL;
1370         struct hlist_node     *hnode;
1371         cfs_hash_bd_t         bds[2];
1372
1373         cfs_hash_lock(hs, 0);
1374         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1375
1376         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1377         if (hnode != NULL)
1378                 obj = cfs_hash_object(hs, hnode);
1379
1380         cfs_hash_dual_bd_unlock(hs, bds, 0);
1381         cfs_hash_unlock(hs, 0);
1382
1383         return obj;
1384 }
1385 EXPORT_SYMBOL(cfs_hash_lookup);
1386
1387 static void
1388 cfs_hash_for_each_enter(cfs_hash_t *hs)
1389 {
1390         LASSERT(!cfs_hash_is_exiting(hs));
1391
1392         if (!cfs_hash_with_rehash(hs))
1393                 return;
1394         /*
1395          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1396          * because it's just an unreliable signal to rehash-thread,
1397          * rehash-thread will try to finsih rehash ASAP when seeing this.
1398          */
1399         hs->hs_iterating = 1;
1400
1401         cfs_hash_lock(hs, 1);
1402         hs->hs_iterators++;
1403
1404         /* NB: iteration is mostly called by service thread,
1405          * we tend to cancel pending rehash-requst, instead of
1406          * blocking service thread, we will relaunch rehash request
1407          * after iteration */
1408         if (cfs_hash_is_rehashing(hs))
1409                 cfs_hash_rehash_cancel_locked(hs);
1410         cfs_hash_unlock(hs, 1);
1411 }
1412
1413 static void
1414 cfs_hash_for_each_exit(cfs_hash_t *hs)
1415 {
1416         int remained;
1417         int bits;
1418
1419         if (!cfs_hash_with_rehash(hs))
1420                 return;
1421         cfs_hash_lock(hs, 1);
1422         remained = --hs->hs_iterators;
1423         bits = cfs_hash_rehash_bits(hs);
1424         cfs_hash_unlock(hs, 1);
1425         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1426         if (remained == 0)
1427                 hs->hs_iterating = 0;
1428         if (bits > 0) {
1429                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1430                                     CFS_HASH_LOOP_HOG);
1431         }
1432 }
1433
1434 /**
1435  * For each item in the libcfs hash @hs call the passed callback @func
1436  * and pass to it as an argument each hash item and the private @data.
1437  *
1438  * a) the function may sleep!
1439  * b) during the callback:
1440  *    . the bucket lock is held so the callback must never sleep.
1441  *    . if @removal_safe is true, use can remove current item by
1442  *      cfs_hash_bd_del_locked
1443  */
1444 static __u64
1445 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1446                         void *data, int remove_safe)
1447 {
1448         struct hlist_node       *hnode;
1449         struct hlist_node       *pos;
1450         cfs_hash_bd_t           bd;
1451         __u64                   count = 0;
1452         int                     excl  = !!remove_safe;
1453         int                     loop  = 0;
1454         int                     i;
1455         ENTRY;
1456
1457         cfs_hash_for_each_enter(hs);
1458
1459         cfs_hash_lock(hs, 0);
1460         LASSERT(!cfs_hash_is_rehashing(hs));
1461
1462         cfs_hash_for_each_bucket(hs, &bd, i) {
1463                 struct hlist_head *hhead;
1464
1465                 cfs_hash_bd_lock(hs, &bd, excl);
1466                 if (func == NULL) { /* only glimpse size */
1467                         count += bd.bd_bucket->hsb_count;
1468                         cfs_hash_bd_unlock(hs, &bd, excl);
1469                         continue;
1470                 }
1471
1472                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1473                         hlist_for_each_safe(hnode, pos, hhead) {
1474                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1475                                 count++;
1476                                 loop++;
1477                                 if (func(hs, &bd, hnode, data)) {
1478                                         cfs_hash_bd_unlock(hs, &bd, excl);
1479                                         goto out;
1480                                 }
1481                         }
1482                 }
1483                 cfs_hash_bd_unlock(hs, &bd, excl);
1484                 if (loop < CFS_HASH_LOOP_HOG)
1485                         continue;
1486                 loop = 0;
1487                 cfs_hash_unlock(hs, 0);
1488                 cond_resched();
1489                 cfs_hash_lock(hs, 0);
1490         }
1491  out:
1492         cfs_hash_unlock(hs, 0);
1493
1494         cfs_hash_for_each_exit(hs);
1495         RETURN(count);
1496 }
1497
1498 typedef struct {
1499         cfs_hash_cond_opt_cb_t  func;
1500         void                   *arg;
1501 } cfs_hash_cond_arg_t;
1502
1503 static int
1504 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1505                          struct hlist_node *hnode, void *data)
1506 {
1507         cfs_hash_cond_arg_t *cond = data;
1508
1509         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1510                 cfs_hash_bd_del_locked(hs, bd, hnode);
1511         return 0;
1512 }
1513
1514 /**
1515  * Delete item from the libcfs hash @hs when @func return true.
1516  * The write lock being hold during loop for each bucket to avoid
1517  * any object be reference.
1518  */
1519 void
1520 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1521 {
1522         cfs_hash_cond_arg_t arg = {
1523                 .func   = func,
1524                 .arg    = data,
1525         };
1526
1527         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1528 }
1529 EXPORT_SYMBOL(cfs_hash_cond_del);
1530
1531 void
1532 cfs_hash_for_each(cfs_hash_t *hs,
1533                   cfs_hash_for_each_cb_t func, void *data)
1534 {
1535         cfs_hash_for_each_tight(hs, func, data, 0);
1536 }
1537 EXPORT_SYMBOL(cfs_hash_for_each);
1538
1539 void
1540 cfs_hash_for_each_safe(cfs_hash_t *hs,
1541                        cfs_hash_for_each_cb_t func, void *data)
1542 {
1543         cfs_hash_for_each_tight(hs, func, data, 1);
1544 }
1545 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1546
1547 static int
1548 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1549               struct hlist_node *hnode, void *data)
1550 {
1551         *(int *)data = 0;
1552         return 1; /* return 1 to break the loop */
1553 }
1554
1555 int
1556 cfs_hash_is_empty(cfs_hash_t *hs)
1557 {
1558         int empty = 1;
1559
1560         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1561         return empty;
1562 }
1563 EXPORT_SYMBOL(cfs_hash_is_empty);
1564
1565 __u64
1566 cfs_hash_size_get(cfs_hash_t *hs)
1567 {
1568         return cfs_hash_with_counter(hs) ?
1569                atomic_read(&hs->hs_count) :
1570                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1571 }
1572 EXPORT_SYMBOL(cfs_hash_size_get);
1573
1574 /*
1575  * cfs_hash_for_each_relax:
1576  * Iterate the hash table and call @func on each item without
1577  * any lock. This function can't guarantee to finish iteration
1578  * if these features are enabled:
1579  *
1580  *  a. if rehash_key is enabled, an item can be moved from
1581  *     one bucket to another bucket
1582  *  b. user can remove non-zero-ref item from hash-table,
1583  *     so the item can be removed from hash-table, even worse,
1584  *     it's possible that user changed key and insert to another
1585  *     hash bucket.
1586  * there's no way for us to finish iteration correctly on previous
1587  * two cases, so iteration has to be stopped on change.
1588  */
1589 static int
1590 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1591 {
1592         struct hlist_node *hnode;
1593         struct hlist_node *tmp;
1594         cfs_hash_bd_t     bd;
1595         __u32             version;
1596         int               count = 0;
1597         int               stop_on_change;
1598         int               rc;
1599         int               i;
1600         ENTRY;
1601
1602         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1603                          !cfs_hash_with_no_itemref(hs) ||
1604                          CFS_HOP(hs, put_locked) == NULL;
1605         cfs_hash_lock(hs, 0);
1606         LASSERT(!cfs_hash_is_rehashing(hs));
1607
1608         cfs_hash_for_each_bucket(hs, &bd, i) {
1609                 struct hlist_head *hhead;
1610
1611                 cfs_hash_bd_lock(hs, &bd, 0);
1612                 version = cfs_hash_bd_version_get(&bd);
1613
1614                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1615                         for (hnode = hhead->first; hnode != NULL;) {
1616                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1617                                 cfs_hash_get(hs, hnode);
1618                                 cfs_hash_bd_unlock(hs, &bd, 0);
1619                                 cfs_hash_unlock(hs, 0);
1620
1621                                 rc = func(hs, &bd, hnode, data);
1622                                 if (stop_on_change)
1623                                         cfs_hash_put(hs, hnode);
1624                                 cond_resched();
1625                                 count++;
1626
1627                                 cfs_hash_lock(hs, 0);
1628                                 cfs_hash_bd_lock(hs, &bd, 0);
1629                                 if (!stop_on_change) {
1630                                         tmp = hnode->next;
1631                                         cfs_hash_put_locked(hs, hnode);
1632                                         hnode = tmp;
1633                                 } else { /* bucket changed? */
1634                                         if (version !=
1635                                             cfs_hash_bd_version_get(&bd))
1636                                                 break;
1637                                         /* safe to continue because no change */
1638                                         hnode = hnode->next;
1639                                 }
1640                                 if (rc) /* callback wants to break iteration */
1641                                         break;
1642                         }
1643                 }
1644                 cfs_hash_bd_unlock(hs, &bd, 0);
1645         }
1646         cfs_hash_unlock(hs, 0);
1647
1648         return count;
1649 }
1650
1651 int
1652 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1653                          cfs_hash_for_each_cb_t func, void *data)
1654 {
1655         ENTRY;
1656
1657         if (cfs_hash_with_no_lock(hs) ||
1658             cfs_hash_with_rehash_key(hs) ||
1659             !cfs_hash_with_no_itemref(hs))
1660                 RETURN(-EOPNOTSUPP);
1661
1662         if (CFS_HOP(hs, get) == NULL ||
1663             (CFS_HOP(hs, put) == NULL &&
1664              CFS_HOP(hs, put_locked) == NULL))
1665                 RETURN(-EOPNOTSUPP);
1666
1667         cfs_hash_for_each_enter(hs);
1668         cfs_hash_for_each_relax(hs, func, data);
1669         cfs_hash_for_each_exit(hs);
1670
1671         RETURN(0);
1672 }
1673 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1674
1675 /**
1676  * For each hash bucket in the libcfs hash @hs call the passed callback
1677  * @func until all the hash buckets are empty.  The passed callback @func
1678  * or the previously registered callback hs->hs_put must remove the item
1679  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1680  * functions.  No rwlocks will be held during the callback @func it is
1681  * safe to sleep if needed.  This function will not terminate until the
1682  * hash is empty.  Note it is still possible to concurrently add new
1683  * items in to the hash.  It is the callers responsibility to ensure
1684  * the required locking is in place to prevent concurrent insertions.
1685  */
1686 int
1687 cfs_hash_for_each_empty(cfs_hash_t *hs,
1688                         cfs_hash_for_each_cb_t func, void *data)
1689 {
1690         unsigned  i = 0;
1691         ENTRY;
1692
1693         if (cfs_hash_with_no_lock(hs))
1694                 return -EOPNOTSUPP;
1695
1696         if (CFS_HOP(hs, get) == NULL ||
1697             (CFS_HOP(hs, put) == NULL &&
1698              CFS_HOP(hs, put_locked) == NULL))
1699                 return -EOPNOTSUPP;
1700
1701         cfs_hash_for_each_enter(hs);
1702         while (cfs_hash_for_each_relax(hs, func, data)) {
1703                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1704                        hs->hs_name, i++);
1705         }
1706         cfs_hash_for_each_exit(hs);
1707         RETURN(0);
1708 }
1709 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1710
1711 void
1712 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1713                         cfs_hash_for_each_cb_t func, void *data)
1714 {
1715         struct hlist_head *hhead;
1716         struct hlist_node *hnode;
1717         cfs_hash_bd_t      bd;
1718
1719         cfs_hash_for_each_enter(hs);
1720         cfs_hash_lock(hs, 0);
1721         if (hindex >= CFS_HASH_NHLIST(hs))
1722                 goto out;
1723
1724         cfs_hash_bd_index_set(hs, hindex, &bd);
1725
1726         cfs_hash_bd_lock(hs, &bd, 0);
1727         hhead = cfs_hash_bd_hhead(hs, &bd);
1728         hlist_for_each(hnode, hhead) {
1729                 if (func(hs, &bd, hnode, data))
1730                         break;
1731         }
1732         cfs_hash_bd_unlock(hs, &bd, 0);
1733 out:
1734         cfs_hash_unlock(hs, 0);
1735         cfs_hash_for_each_exit(hs);
1736 }
1737
1738 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1739
1740 /*
1741  * For each item in the libcfs hash @hs which matches the @key call
1742  * the passed callback @func and pass to it as an argument each hash
1743  * item and the private @data. During the callback the bucket lock
1744  * is held so the callback must never sleep.
1745    */
1746 void
1747 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1748                         cfs_hash_for_each_cb_t func, void *data)
1749 {
1750         struct hlist_node *hnode;
1751         cfs_hash_bd_t      bds[2];
1752         unsigned           i;
1753
1754         cfs_hash_lock(hs, 0);
1755
1756         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1757
1758         cfs_hash_for_each_bd(bds, 2, i) {
1759                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1760
1761                 hlist_for_each(hnode, hlist) {
1762                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1763
1764                         if (cfs_hash_keycmp(hs, key, hnode)) {
1765                                 if (func(hs, &bds[i], hnode, data))
1766                                         break;
1767                         }
1768                 }
1769         }
1770
1771         cfs_hash_dual_bd_unlock(hs, bds, 0);
1772         cfs_hash_unlock(hs, 0);
1773 }
1774 EXPORT_SYMBOL(cfs_hash_for_each_key);
1775
1776 /**
1777  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1778  * to grow the hash size when excessive chaining is detected, or to
1779  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1780  * flag is set in @hs the libcfs hash may be dynamically rehashed
1781  * during addition or removal if the hash's theta value exceeds
1782  * either the hs->hs_min_theta or hs->max_theta values.  By default
1783  * these values are tuned to keep the chained hash depth small, and
1784  * this approach assumes a reasonably uniform hashing function.  The
1785  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1786  */
1787 void
1788 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1789 {
1790         int     i;
1791
1792         /* need hold cfs_hash_lock(hs, 1) */
1793         LASSERT(cfs_hash_with_rehash(hs) &&
1794                 !cfs_hash_with_no_lock(hs));
1795
1796         if (!cfs_hash_is_rehashing(hs))
1797                 return;
1798
1799         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1800                 hs->hs_rehash_bits = 0;
1801                 return;
1802         }
1803
1804         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1805                 cfs_hash_unlock(hs, 1);
1806                 /* raise console warning while waiting too long */
1807                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1808                        "hash %s is still rehashing, rescheded %d\n",
1809                        hs->hs_name, i - 1);
1810                 cond_resched();
1811                 cfs_hash_lock(hs, 1);
1812         }
1813 }
1814 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1815
1816 void
1817 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1818 {
1819         cfs_hash_lock(hs, 1);
1820         cfs_hash_rehash_cancel_locked(hs);
1821         cfs_hash_unlock(hs, 1);
1822 }
1823 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1824
1825 int
1826 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1827 {
1828         int     rc;
1829
1830         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1831
1832         cfs_hash_lock(hs, 1);
1833
1834         rc = cfs_hash_rehash_bits(hs);
1835         if (rc <= 0) {
1836                 cfs_hash_unlock(hs, 1);
1837                 return rc;
1838         }
1839
1840         hs->hs_rehash_bits = rc;
1841         if (!do_rehash) {
1842                 /* launch and return */
1843                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1844                 cfs_hash_unlock(hs, 1);
1845                 return 0;
1846         }
1847
1848         /* rehash right now */
1849         cfs_hash_unlock(hs, 1);
1850
1851         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1852 }
1853 EXPORT_SYMBOL(cfs_hash_rehash);
1854
1855 static int
1856 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1857 {
1858         cfs_hash_bd_t      new;
1859         struct hlist_head *hhead;
1860         struct hlist_node *hnode;
1861         struct hlist_node *pos;
1862         void              *key;
1863         int                c = 0;
1864
1865         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1866         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1867                 hlist_for_each_safe(hnode, pos, hhead) {
1868                         key = cfs_hash_key(hs, hnode);
1869                         LASSERT(key != NULL);
1870                         /* Validate hnode is in the correct bucket. */
1871                         cfs_hash_bucket_validate(hs, old, hnode);
1872                         /*
1873                          * Delete from old hash bucket; move to new bucket.
1874                          * ops->hs_key must be defined.
1875                          */
1876                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1877                                              hs->hs_rehash_bits, key, &new);
1878                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1879                         c++;
1880                 }
1881         }
1882         return c;
1883 }
1884
1885 static int
1886 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1887 {
1888         cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1889         cfs_hash_bucket_t **bkts;
1890         cfs_hash_bd_t       bd;
1891         unsigned int        old_size;
1892         unsigned int        new_size;
1893         int                 bsize;
1894         int                 count = 0;
1895         int                 rc = 0;
1896         int                 i;
1897
1898         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1899
1900         cfs_hash_lock(hs, 0);
1901         LASSERT(cfs_hash_is_rehashing(hs));
1902
1903         old_size = CFS_HASH_NBKT(hs);
1904         new_size = CFS_HASH_RH_NBKT(hs);
1905
1906         cfs_hash_unlock(hs, 0);
1907
1908         /*
1909          * don't need hs::hs_rwlock for hs::hs_buckets,
1910          * because nobody can change bkt-table except me.
1911          */
1912         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1913                                         old_size, new_size);
1914         cfs_hash_lock(hs, 1);
1915         if (bkts == NULL) {
1916                 rc = -ENOMEM;
1917                 goto out;
1918         }
1919
1920         if (bkts == hs->hs_buckets) {
1921                 bkts = NULL; /* do nothing */
1922                 goto out;
1923         }
1924
1925         rc = __cfs_hash_theta(hs);
1926         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1927                 /* free the new allocated bkt-table */
1928                 old_size = new_size;
1929                 new_size = CFS_HASH_NBKT(hs);
1930                 rc = -EALREADY;
1931                 goto out;
1932         }
1933
1934         LASSERT(hs->hs_rehash_buckets == NULL);
1935         hs->hs_rehash_buckets = bkts;
1936
1937         rc = 0;
1938         cfs_hash_for_each_bucket(hs, &bd, i) {
1939                 if (cfs_hash_is_exiting(hs)) {
1940                         rc = -ESRCH;
1941                         /* someone wants to destroy the hash, abort now */
1942                         if (old_size < new_size) /* OK to free old bkt-table */
1943                                 break;
1944                         /* it's shrinking, need free new bkt-table */
1945                         hs->hs_rehash_buckets = NULL;
1946                         old_size = new_size;
1947                         new_size = CFS_HASH_NBKT(hs);
1948                         goto out;
1949                 }
1950
1951                 count += cfs_hash_rehash_bd(hs, &bd);
1952                 if (count < CFS_HASH_LOOP_HOG ||
1953                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1954                         continue;
1955                 }
1956
1957                 count = 0;
1958                 cfs_hash_unlock(hs, 1);
1959                 cond_resched();
1960                 cfs_hash_lock(hs, 1);
1961         }
1962
1963         hs->hs_rehash_count++;
1964
1965         bkts = hs->hs_buckets;
1966         hs->hs_buckets = hs->hs_rehash_buckets;
1967         hs->hs_rehash_buckets = NULL;
1968
1969         hs->hs_cur_bits = hs->hs_rehash_bits;
1970  out:
1971         hs->hs_rehash_bits = 0;
1972         if (rc == -ESRCH) /* never be scheduled again */
1973                 cfs_wi_exit(cfs_sched_rehash, wi);
1974         bsize = cfs_hash_bkt_size(hs);
1975         cfs_hash_unlock(hs, 1);
1976         /* can't refer to @hs anymore because it could be destroyed */
1977         if (bkts != NULL)
1978                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1979         if (rc != 0)
1980                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1981         /* return 1 only if cfs_wi_exit is called */
1982         return rc == -ESRCH;
1983 }
1984
1985 /**
1986  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1987  * @old_key must be provided to locate the objects previous location
1988  * in the hash, and the @new_key will be used to reinsert the object.
1989  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1990  * combo when it is critical that there is no window in time where the
1991  * object is missing from the hash.  When an object is being rehashed
1992  * the registered cfs_hash_get() and cfs_hash_put() functions will
1993  * not be called.
1994  */
1995 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1996                          void *new_key, struct hlist_node *hnode)
1997 {
1998         cfs_hash_bd_t        bds[3];
1999         cfs_hash_bd_t        old_bds[2];
2000         cfs_hash_bd_t        new_bd;
2001
2002         LASSERT(!hlist_unhashed(hnode));
2003
2004         cfs_hash_lock(hs, 0);
2005
2006         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2007         cfs_hash_bd_get(hs, new_key, &new_bd);
2008
2009         bds[0] = old_bds[0];
2010         bds[1] = old_bds[1];
2011         bds[2] = new_bd;
2012
2013         /* NB: bds[0] and bds[1] are ordered already */
2014         cfs_hash_bd_order(&bds[1], &bds[2]);
2015         cfs_hash_bd_order(&bds[0], &bds[1]);
2016
2017         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2018         if (likely(old_bds[1].bd_bucket == NULL)) {
2019                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2020         } else {
2021                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2022                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2023         }
2024         /* overwrite key inside locks, otherwise may screw up with
2025          * other operations, i.e: rehash */
2026         cfs_hash_keycpy(hs, hnode, new_key);
2027
2028         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2029         cfs_hash_unlock(hs, 0);
2030 }
2031 EXPORT_SYMBOL(cfs_hash_rehash_key);
2032
2033 int cfs_hash_debug_header(struct seq_file *m)
2034 {
2035         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2036                         CFS_HASH_BIGNAME_LEN,
2037                         "name", "cur", "min", "max", "theta", "t-min", "t-max",
2038                         "flags", "rehash", "count", "maxdep", "maxdepb",
2039                         " distribution");
2040 }
2041 EXPORT_SYMBOL(cfs_hash_debug_header);
2042
2043 static cfs_hash_bucket_t **
2044 cfs_hash_full_bkts(cfs_hash_t *hs)
2045 {
2046         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2047         if (hs->hs_rehash_buckets == NULL)
2048                 return hs->hs_buckets;
2049
2050         LASSERT(hs->hs_rehash_bits != 0);
2051         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2052                hs->hs_rehash_buckets : hs->hs_buckets;
2053 }
2054
2055 static unsigned int
2056 cfs_hash_full_nbkt(cfs_hash_t *hs)
2057 {
2058         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2059         if (hs->hs_rehash_buckets == NULL)
2060                 return CFS_HASH_NBKT(hs);
2061
2062         LASSERT(hs->hs_rehash_bits != 0);
2063         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2064                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2065 }
2066
2067 int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
2068 {
2069         int     dist[8] = { 0, };
2070         int     maxdep  = -1;
2071         int     maxdepb = -1;
2072         int     total   = 0;
2073         int     c       = 0;
2074         int     theta;
2075         int     i;
2076
2077         cfs_hash_lock(hs, 0);
2078         theta = __cfs_hash_theta(hs);
2079
2080         c += seq_printf(m, "%-*s ", CFS_HASH_BIGNAME_LEN, hs->hs_name);
2081         c += seq_printf(m, "%5d ",  1 << hs->hs_cur_bits);
2082         c += seq_printf(m, "%5d ",  1 << hs->hs_min_bits);
2083         c += seq_printf(m, "%5d ",  1 << hs->hs_max_bits);
2084         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(theta),
2085                         __cfs_hash_theta_frac(theta));
2086         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_min_theta),
2087                         __cfs_hash_theta_frac(hs->hs_min_theta));
2088         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_max_theta),
2089                         __cfs_hash_theta_frac(hs->hs_max_theta));
2090         c += seq_printf(m, " 0x%02x ", hs->hs_flags);
2091         c += seq_printf(m, "%6d ", hs->hs_rehash_count);
2092
2093         /*
2094          * The distribution is a summary of the chained hash depth in
2095          * each of the libcfs hash buckets.  Each buckets hsb_count is
2096          * divided by the hash theta value and used to generate a
2097          * histogram of the hash distribution.  A uniform hash will
2098          * result in all hash buckets being close to the average thus
2099          * only the first few entries in the histogram will be non-zero.
2100          * If you hash function results in a non-uniform hash the will
2101          * be observable by outlier bucks in the distribution histogram.
2102          *
2103          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2104          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2105          */
2106         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2107                 cfs_hash_bd_t bd;
2108
2109                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2110                 cfs_hash_bd_lock(hs, &bd, 0);
2111                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2112                         maxdep  = bd.bd_bucket->hsb_depmax;
2113 #ifdef __KERNEL__
2114                         maxdepb = ffz(~maxdep);
2115 #endif
2116                 }
2117                 total += bd.bd_bucket->hsb_count;
2118                 dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2119                 cfs_hash_bd_unlock(hs, &bd, 0);
2120         }
2121
2122         c += seq_printf(m, "%7d ", total);
2123         c += seq_printf(m, "%7d ", maxdep);
2124         c += seq_printf(m, "%7d ", maxdepb);
2125         for (i = 0; i < 8; i++)
2126                 c += seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2127
2128         cfs_hash_unlock(hs, 0);
2129         return c;
2130 }
2131 EXPORT_SYMBOL(cfs_hash_debug_str);