Whamcloud - gitweb
LU-2600 osd-zfs: batched object accounting
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(intead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can speicify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109
110 #include <libcfs/libcfs.h>
111
112 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
113 static unsigned int warn_on_depth = 8;
114 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
115                 "warning when hash depth is high.");
116 #endif
117
118 struct cfs_wi_sched *cfs_sched_rehash;
119
120 static inline void
121 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
122
123 static inline void
124 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
125
126 static inline void
127 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
128 {
129         spin_lock(&lock->spin);
130 }
131
132 static inline void
133 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
134 {
135         spin_unlock(&lock->spin);
136 }
137
138 static inline void
139 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
140 {
141         if (!exclusive)
142                 read_lock(&lock->rw);
143         else
144                 write_lock(&lock->rw);
145 }
146
147 static inline void
148 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
149 {
150         if (!exclusive)
151                 read_unlock(&lock->rw);
152         else
153                 write_unlock(&lock->rw);
154 }
155
156 /** No lock hash */
157 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
158 {
159         .hs_lock        = cfs_hash_nl_lock,
160         .hs_unlock      = cfs_hash_nl_unlock,
161         .hs_bkt_lock    = cfs_hash_nl_lock,
162         .hs_bkt_unlock  = cfs_hash_nl_unlock,
163 };
164
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
167 {
168         .hs_lock        = cfs_hash_spin_lock,
169         .hs_unlock      = cfs_hash_spin_unlock,
170         .hs_bkt_lock    = cfs_hash_nl_lock,
171         .hs_bkt_unlock  = cfs_hash_nl_unlock,
172 };
173
174 /** spin bucket lock, rehash is enabled */
175 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
176 {
177         .hs_lock        = cfs_hash_rw_lock,
178         .hs_unlock      = cfs_hash_rw_unlock,
179         .hs_bkt_lock    = cfs_hash_spin_lock,
180         .hs_bkt_unlock  = cfs_hash_spin_unlock,
181 };
182
183 /** rw bucket lock, rehash is enabled */
184 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
185 {
186         .hs_lock        = cfs_hash_rw_lock,
187         .hs_unlock      = cfs_hash_rw_unlock,
188         .hs_bkt_lock    = cfs_hash_rw_lock,
189         .hs_bkt_unlock  = cfs_hash_rw_unlock,
190 };
191
192 /** spin bucket lock, rehash is disabled */
193 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
194 {
195         .hs_lock        = cfs_hash_nl_lock,
196         .hs_unlock      = cfs_hash_nl_unlock,
197         .hs_bkt_lock    = cfs_hash_spin_lock,
198         .hs_bkt_unlock  = cfs_hash_spin_unlock,
199 };
200
201 /** rw bucket lock, rehash is disabled */
202 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
203 {
204         .hs_lock        = cfs_hash_nl_lock,
205         .hs_unlock      = cfs_hash_nl_unlock,
206         .hs_bkt_lock    = cfs_hash_rw_lock,
207         .hs_bkt_unlock  = cfs_hash_rw_unlock,
208 };
209
210 static void
211 cfs_hash_lock_setup(cfs_hash_t *hs)
212 {
213         if (cfs_hash_with_no_lock(hs)) {
214                 hs->hs_lops = &cfs_hash_nl_lops;
215
216         } else if (cfs_hash_with_no_bktlock(hs)) {
217                 hs->hs_lops = &cfs_hash_nbl_lops;
218                 spin_lock_init(&hs->hs_lock.spin);
219
220         } else if (cfs_hash_with_rehash(hs)) {
221                 rwlock_init(&hs->hs_lock.rw);
222
223                 if (cfs_hash_with_rw_bktlock(hs))
224                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
225                 else if (cfs_hash_with_spin_bktlock(hs))
226                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
227                 else
228                         LBUG();
229         } else {
230                 if (cfs_hash_with_rw_bktlock(hs))
231                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
232                 else if (cfs_hash_with_spin_bktlock(hs))
233                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
234                 else
235                         LBUG();
236         }
237 }
238
239 /**
240  * Simple hash head without depth tracking
241  * new element is always added to head of hlist
242  */
243 typedef struct {
244         struct hlist_head       hh_head;        /**< entries list */
245 } cfs_hash_head_t;
246
247 static int
248 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
249 {
250         return sizeof(cfs_hash_head_t);
251 }
252
253 static struct hlist_head *
254 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
255 {
256         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
257
258         return &head[bd->bd_offset].hh_head;
259 }
260
261 static int
262 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
263                       struct hlist_node *hnode)
264 {
265         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266         return -1; /* unknown depth */
267 }
268
269 static int
270 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
271                       struct hlist_node *hnode)
272 {
273         hlist_del_init(hnode);
274         return -1; /* unknown depth */
275 }
276
277 /**
278  * Simple hash head with depth tracking
279  * new element is always added to head of hlist
280  */
281 typedef struct {
282         struct hlist_head       hd_head;        /**< entries list */
283         unsigned int            hd_depth;       /**< list length */
284 } cfs_hash_head_dep_t;
285
286 static int
287 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
288 {
289         return sizeof(cfs_hash_head_dep_t);
290 }
291
292 static struct hlist_head *
293 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
294 {
295         cfs_hash_head_dep_t   *head;
296
297         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
298         return &head[bd->bd_offset].hd_head;
299 }
300
301 static int
302 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
303                       struct hlist_node *hnode)
304 {
305         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
306                                                cfs_hash_head_dep_t, hd_head);
307         hlist_add_head(hnode, &hh->hd_head);
308         return ++hh->hd_depth;
309 }
310
311 static int
312 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
313                       struct hlist_node *hnode)
314 {
315         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
316                                                cfs_hash_head_dep_t, hd_head);
317         hlist_del_init(hnode);
318         return --hh->hd_depth;
319 }
320
321 /**
322  * double links hash head without depth tracking
323  * new element is always added to tail of hlist
324  */
325 typedef struct {
326         struct hlist_head       dh_head;        /**< entries list */
327         struct hlist_node       *dh_tail;       /**< the last entry */
328 } cfs_hash_dhead_t;
329
330 static int
331 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
332 {
333         return sizeof(cfs_hash_dhead_t);
334 }
335
336 static struct hlist_head *
337 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
338 {
339         cfs_hash_dhead_t *head;
340
341         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
342         return &head[bd->bd_offset].dh_head;
343 }
344
345 static int
346 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
347                       struct hlist_node *hnode)
348 {
349         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
350                                             cfs_hash_dhead_t, dh_head);
351
352         if (dh->dh_tail != NULL) /* not empty */
353                 hlist_add_after(dh->dh_tail, hnode);
354         else /* empty list */
355                 hlist_add_head(hnode, &dh->dh_head);
356         dh->dh_tail = hnode;
357         return -1; /* unknown depth */
358 }
359
360 static int
361 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
362                       struct hlist_node *hnd)
363 {
364         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
365                                             cfs_hash_dhead_t, dh_head);
366
367         if (hnd->next == NULL) { /* it's the tail */
368                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
369                               container_of(hnd->pprev, struct hlist_node, next);
370         }
371         hlist_del_init(hnd);
372         return -1; /* unknown depth */
373 }
374
375 /**
376  * double links hash head with depth tracking
377  * new element is always added to tail of hlist
378  */
379 typedef struct {
380         struct hlist_head       dd_head;        /**< entries list */
381         struct hlist_node       *dd_tail;       /**< the last entry */
382         unsigned int            dd_depth;       /**< list length */
383 } cfs_hash_dhead_dep_t;
384
385 static int
386 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
387 {
388         return sizeof(cfs_hash_dhead_dep_t);
389 }
390
391 static struct hlist_head *
392 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
393 {
394         cfs_hash_dhead_dep_t *head;
395
396         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
397         return &head[bd->bd_offset].dd_head;
398 }
399
400 static int
401 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
402                       struct hlist_node *hnode)
403 {
404         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
405                                                 cfs_hash_dhead_dep_t, dd_head);
406
407         if (dh->dd_tail != NULL) /* not empty */
408                 hlist_add_after(dh->dd_tail, hnode);
409         else /* empty list */
410                 hlist_add_head(hnode, &dh->dd_head);
411         dh->dd_tail = hnode;
412         return ++dh->dd_depth;
413 }
414
415 static int
416 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
417                       struct hlist_node *hnd)
418 {
419         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
420                                                 cfs_hash_dhead_dep_t, dd_head);
421
422         if (hnd->next == NULL) { /* it's the tail */
423                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
424                               container_of(hnd->pprev, struct hlist_node, next);
425         }
426         hlist_del_init(hnd);
427         return --dh->dd_depth;
428 }
429
430 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
431        .hop_hhead      = cfs_hash_hh_hhead,
432        .hop_hhead_size = cfs_hash_hh_hhead_size,
433        .hop_hnode_add  = cfs_hash_hh_hnode_add,
434        .hop_hnode_del  = cfs_hash_hh_hnode_del,
435 };
436
437 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
438        .hop_hhead      = cfs_hash_hd_hhead,
439        .hop_hhead_size = cfs_hash_hd_hhead_size,
440        .hop_hnode_add  = cfs_hash_hd_hnode_add,
441        .hop_hnode_del  = cfs_hash_hd_hnode_del,
442 };
443
444 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
445        .hop_hhead      = cfs_hash_dh_hhead,
446        .hop_hhead_size = cfs_hash_dh_hhead_size,
447        .hop_hnode_add  = cfs_hash_dh_hnode_add,
448        .hop_hnode_del  = cfs_hash_dh_hnode_del,
449 };
450
451 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
452        .hop_hhead      = cfs_hash_dd_hhead,
453        .hop_hhead_size = cfs_hash_dd_hhead_size,
454        .hop_hnode_add  = cfs_hash_dd_hnode_add,
455        .hop_hnode_del  = cfs_hash_dd_hnode_del,
456 };
457
458 static void
459 cfs_hash_hlist_setup(cfs_hash_t *hs)
460 {
461         if (cfs_hash_with_add_tail(hs)) {
462                 hs->hs_hops = cfs_hash_with_depth(hs) ?
463                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
464         } else {
465                 hs->hs_hops = cfs_hash_with_depth(hs) ?
466                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
467         }
468 }
469
470 static void
471 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
472                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
473 {
474         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
475
476         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
477
478         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
479         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
480 }
481
482 void
483 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
484 {
485         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
486         if (likely(hs->hs_rehash_buckets == NULL)) {
487                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
488                                      hs->hs_cur_bits, key, bd);
489         } else {
490                 LASSERT(hs->hs_rehash_bits != 0);
491                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
492                                      hs->hs_rehash_bits, key, bd);
493         }
494 }
495 EXPORT_SYMBOL(cfs_hash_bd_get);
496
497 static inline void
498 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
499 {
500         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
501                 return;
502
503         bd->bd_bucket->hsb_depmax = dep_cur;
504 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
505         if (likely(warn_on_depth == 0 ||
506                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
507                 return;
508
509         spin_lock(&hs->hs_dep_lock);
510         hs->hs_dep_max  = dep_cur;
511         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
512         hs->hs_dep_off  = bd->bd_offset;
513         hs->hs_dep_bits = hs->hs_cur_bits;
514         spin_unlock(&hs->hs_dep_lock);
515
516         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
517 # endif
518 }
519
520 void
521 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
522                         struct hlist_node *hnode)
523 {
524         int rc;
525
526         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
527         cfs_hash_bd_dep_record(hs, bd, rc);
528         bd->bd_bucket->hsb_version++;
529         if (unlikely(bd->bd_bucket->hsb_version == 0))
530                 bd->bd_bucket->hsb_version++;
531         bd->bd_bucket->hsb_count++;
532
533         if (cfs_hash_with_counter(hs))
534                 atomic_inc(&hs->hs_count);
535         if (!cfs_hash_with_no_itemref(hs))
536                 cfs_hash_get(hs, hnode);
537 }
538 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
539
540 void
541 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
542                        struct hlist_node *hnode)
543 {
544         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
545
546         LASSERT(bd->bd_bucket->hsb_count > 0);
547         bd->bd_bucket->hsb_count--;
548         bd->bd_bucket->hsb_version++;
549         if (unlikely(bd->bd_bucket->hsb_version == 0))
550                 bd->bd_bucket->hsb_version++;
551
552         if (cfs_hash_with_counter(hs)) {
553                 LASSERT(atomic_read(&hs->hs_count) > 0);
554                 atomic_dec(&hs->hs_count);
555         }
556         if (!cfs_hash_with_no_itemref(hs))
557                 cfs_hash_put_locked(hs, hnode);
558 }
559 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
560
561 void
562 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
563                         cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
564 {
565         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
566         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
567         int                rc;
568
569         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
570                 return;
571
572         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
573          * in cfs_hash_bd_del/add_locked */
574         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
575         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
576         cfs_hash_bd_dep_record(hs, bd_new, rc);
577
578         LASSERT(obkt->hsb_count > 0);
579         obkt->hsb_count--;
580         obkt->hsb_version++;
581         if (unlikely(obkt->hsb_version == 0))
582                 obkt->hsb_version++;
583         nbkt->hsb_count++;
584         nbkt->hsb_version++;
585         if (unlikely(nbkt->hsb_version == 0))
586                 nbkt->hsb_version++;
587 }
588 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
589
590 enum {
591         /** always set, for sanity (avoid ZERO intent) */
592         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
593         /** return entry with a ref */
594         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
595         /** add entry if not existing */
596         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
597         /** delete entry, ignore other masks */
598         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
599 };
600
601 typedef enum cfs_hash_lookup_intent {
602         /** return item w/o refcount */
603         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
604         /** return item with refcount */
605         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
606                                        CFS_HS_LOOKUP_MASK_REF),
607         /** return item w/o refcount if existed, otherwise add */
608         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
609                                        CFS_HS_LOOKUP_MASK_ADD),
610         /** return item with refcount if existed, otherwise add */
611         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
612                                        CFS_HS_LOOKUP_MASK_ADD),
613         /** delete if existed */
614         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
615                                        CFS_HS_LOOKUP_MASK_DEL)
616 } cfs_hash_lookup_intent_t;
617
618 static struct hlist_node *
619 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
620                           const void *key, struct hlist_node *hnode,
621                           cfs_hash_lookup_intent_t intent)
622
623 {
624         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
625         struct hlist_node  *ehnode;
626         struct hlist_node  *match;
627         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
628
629         /* with this function, we can avoid a lot of useless refcount ops,
630          * which are expensive atomic operations most time. */
631         match = intent_add ? NULL : hnode;
632         hlist_for_each(ehnode, hhead) {
633                 if (!cfs_hash_keycmp(hs, key, ehnode))
634                         continue;
635
636                 if (match != NULL && match != ehnode) /* can't match */
637                         continue;
638
639                 /* match and ... */
640                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
641                         cfs_hash_bd_del_locked(hs, bd, ehnode);
642                         return ehnode;
643                 }
644
645                 /* caller wants refcount? */
646                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
647                         cfs_hash_get(hs, ehnode);
648                 return ehnode;
649         }
650         /* no match item */
651         if (!intent_add)
652                 return NULL;
653
654         LASSERT(hnode != NULL);
655         cfs_hash_bd_add_locked(hs, bd, hnode);
656         return hnode;
657 }
658
659 struct hlist_node *
660 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
661 {
662         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
663                                         CFS_HS_LOOKUP_IT_FIND);
664 }
665 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
666
667 struct hlist_node *
668 cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
669 {
670         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
671                                         CFS_HS_LOOKUP_IT_PEEK);
672 }
673 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
674
675 struct hlist_node *
676 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
677                            const void *key, struct hlist_node *hnode,
678                            int noref)
679 {
680         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
681                                         CFS_HS_LOOKUP_IT_ADD |
682                                         (!noref * CFS_HS_LOOKUP_MASK_REF));
683 }
684 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
685
686 struct hlist_node *
687 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
688                            const void *key, struct hlist_node *hnode)
689 {
690         /* hnode can be NULL, we find the first item with @key */
691         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
692                                         CFS_HS_LOOKUP_IT_FINDDEL);
693 }
694 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
695
696 static void
697 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
698                        unsigned n, int excl)
699 {
700         cfs_hash_bucket_t *prev = NULL;
701         int                i;
702
703         /**
704          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
705          * NB: it's possible that several bds point to the same bucket but
706          * have different bd::bd_offset, so need take care of deadlock.
707          */
708         cfs_hash_for_each_bd(bds, n, i) {
709                 if (prev == bds[i].bd_bucket)
710                         continue;
711
712                 LASSERT(prev == NULL ||
713                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
714                 cfs_hash_bd_lock(hs, &bds[i], excl);
715                 prev = bds[i].bd_bucket;
716         }
717 }
718
719 static void
720 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
721                          unsigned n, int excl)
722 {
723         cfs_hash_bucket_t *prev = NULL;
724         int                i;
725
726         cfs_hash_for_each_bd(bds, n, i) {
727                 if (prev != bds[i].bd_bucket) {
728                         cfs_hash_bd_unlock(hs, &bds[i], excl);
729                         prev = bds[i].bd_bucket;
730                 }
731         }
732 }
733
734 static struct hlist_node *
735 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
736                                 unsigned n, const void *key)
737 {
738         struct hlist_node *ehnode;
739         unsigned          i;
740
741         cfs_hash_for_each_bd(bds, n, i) {
742                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
743                                                         CFS_HS_LOOKUP_IT_FIND);
744                 if (ehnode != NULL)
745                         return ehnode;
746         }
747         return NULL;
748 }
749
750 static struct hlist_node *
751 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
752                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
753                                  struct hlist_node *hnode, int noref)
754 {
755         struct hlist_node *ehnode;
756         int               intent;
757         unsigned          i;
758
759         LASSERT(hnode != NULL);
760         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
761
762         cfs_hash_for_each_bd(bds, n, i) {
763                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
764                                                    NULL, intent);
765                 if (ehnode != NULL)
766                         return ehnode;
767         }
768
769         if (i == 1) { /* only one bucket */
770                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
771         } else {
772                 cfs_hash_bd_t      mybd;
773
774                 cfs_hash_bd_get(hs, key, &mybd);
775                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
776         }
777
778         return hnode;
779 }
780
781 static struct hlist_node *
782 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
783                                  unsigned n, const void *key,
784                                  struct hlist_node *hnode)
785 {
786         struct hlist_node *ehnode;
787         unsigned           i;
788
789         cfs_hash_for_each_bd(bds, n, i) {
790                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
791                                                    CFS_HS_LOOKUP_IT_FINDDEL);
792                 if (ehnode != NULL)
793                         return ehnode;
794         }
795         return NULL;
796 }
797
798 static void
799 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
800 {
801         int     rc;
802
803         if (bd2->bd_bucket == NULL)
804                 return;
805
806         if (bd1->bd_bucket == NULL) {
807                 *bd1 = *bd2;
808                 bd2->bd_bucket = NULL;
809                 return;
810         }
811
812         rc = cfs_hash_bd_compare(bd1, bd2);
813         if (rc == 0) {
814                 bd2->bd_bucket = NULL;
815
816         } else if (rc > 0) { /* swab bd1 and bd2 */
817                 cfs_hash_bd_t tmp;
818
819                 tmp = *bd2;
820                 *bd2 = *bd1;
821                 *bd1 = tmp;
822         }
823 }
824
825 void
826 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
827 {
828         /* NB: caller should hold hs_lock.rw if REHASH is set */
829         cfs_hash_bd_from_key(hs, hs->hs_buckets,
830                              hs->hs_cur_bits, key, &bds[0]);
831         if (likely(hs->hs_rehash_buckets == NULL)) {
832                 /* no rehash or not rehashing */
833                 bds[1].bd_bucket = NULL;
834                 return;
835         }
836
837         LASSERT(hs->hs_rehash_bits != 0);
838         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
839                              hs->hs_rehash_bits, key, &bds[1]);
840
841         cfs_hash_bd_order(&bds[0], &bds[1]);
842 }
843 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
844
845 void
846 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
847 {
848         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
849 }
850 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
851
852 void
853 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
854 {
855         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
856 }
857 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
858
859 struct hlist_node *
860 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
861                                const void *key)
862 {
863         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
864 }
865 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
866
867 struct hlist_node *
868 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
869                                 const void *key, struct hlist_node *hnode,
870                                 int noref)
871 {
872         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
873                                                 hnode, noref);
874 }
875 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
876
877 struct hlist_node *
878 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
879                                 const void *key, struct hlist_node *hnode)
880 {
881         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
882 }
883 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
884
885 static void
886 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
887                       int bkt_size, int prev_size, int size)
888 {
889         int     i;
890
891         for (i = prev_size; i < size; i++) {
892                 if (buckets[i] != NULL)
893                         LIBCFS_FREE(buckets[i], bkt_size);
894         }
895
896         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
897 }
898
899 /*
900  * Create or grow bucket memory. Return old_buckets if no allocation was
901  * needed, the newly allocated buckets if allocation was needed and
902  * successful, and NULL on error.
903  */
904 static cfs_hash_bucket_t **
905 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
906                          unsigned int old_size, unsigned int new_size)
907 {
908         cfs_hash_bucket_t **new_bkts;
909         int                 i;
910
911         LASSERT(old_size == 0 || old_bkts != NULL);
912
913         if (old_bkts != NULL && old_size == new_size)
914                 return old_bkts;
915
916         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
917         if (new_bkts == NULL)
918                 return NULL;
919
920         if (old_bkts != NULL) {
921                 memcpy(new_bkts, old_bkts,
922                        min(old_size, new_size) * sizeof(*old_bkts));
923         }
924
925         for (i = old_size; i < new_size; i++) {
926                 struct hlist_head *hhead;
927                 cfs_hash_bd_t     bd;
928
929                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
930                 if (new_bkts[i] == NULL) {
931                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
932                                               old_size, new_size);
933                         return NULL;
934                 }
935
936                 new_bkts[i]->hsb_index   = i;
937                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
938                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
939                 bd.bd_bucket = new_bkts[i];
940                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
941                         INIT_HLIST_HEAD(hhead);
942
943                 if (cfs_hash_with_no_lock(hs) ||
944                     cfs_hash_with_no_bktlock(hs))
945                         continue;
946
947                 if (cfs_hash_with_rw_bktlock(hs))
948                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
949                 else if (cfs_hash_with_spin_bktlock(hs))
950                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
951                 else
952                         LBUG(); /* invalid use-case */
953         }
954         return new_bkts;
955 }
956
957 /**
958  * Initialize new libcfs hash, where:
959  * @name     - Descriptive hash name
960  * @cur_bits - Initial hash table size, in bits
961  * @max_bits - Maximum allowed hash table resize, in bits
962  * @ops      - Registered hash table operations
963  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
964  *           - CFS_HASH_SORT enable chained hash sort
965  */
966 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
967
968 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
969 static int cfs_hash_dep_print(cfs_workitem_t *wi)
970 {
971         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
972         int         dep;
973         int         bkt;
974         int         off;
975         int         bits;
976
977         spin_lock(&hs->hs_dep_lock);
978         dep  = hs->hs_dep_max;
979         bkt  = hs->hs_dep_bkt;
980         off  = hs->hs_dep_off;
981         bits = hs->hs_dep_bits;
982         spin_unlock(&hs->hs_dep_lock);
983
984         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
985                       hs->hs_name, bits, dep, bkt, off);
986         spin_lock(&hs->hs_dep_lock);
987         hs->hs_dep_bits = 0; /* mark as workitem done */
988         spin_unlock(&hs->hs_dep_lock);
989         return 0;
990 }
991
992 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
993 {
994         spin_lock_init(&hs->hs_dep_lock);
995         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
996 }
997
998 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
999 {
1000         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1001                 return;
1002
1003         spin_lock(&hs->hs_dep_lock);
1004         while (hs->hs_dep_bits != 0) {
1005                 spin_unlock(&hs->hs_dep_lock);
1006                 cond_resched();
1007                 spin_lock(&hs->hs_dep_lock);
1008         }
1009         spin_unlock(&hs->hs_dep_lock);
1010 }
1011
1012 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1013
1014 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1015 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1016
1017 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1018
1019 cfs_hash_t *
1020 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1021                 unsigned bkt_bits, unsigned extra_bytes,
1022                 unsigned min_theta, unsigned max_theta,
1023                 cfs_hash_ops_t *ops, unsigned flags)
1024 {
1025         cfs_hash_t *hs;
1026         int         len;
1027
1028         ENTRY;
1029
1030         CLASSERT(CFS_HASH_THETA_BITS < 15);
1031
1032         LASSERT(name != NULL);
1033         LASSERT(ops != NULL);
1034         LASSERT(ops->hs_key);
1035         LASSERT(ops->hs_hash);
1036         LASSERT(ops->hs_object);
1037         LASSERT(ops->hs_keycmp);
1038         if ((flags & CFS_HASH_NO_ITEMREF) == 0) {
1039                 LASSERT(ops->hs_get != NULL);
1040                 LASSERT(ops->hs_put_locked != NULL);
1041         }
1042
1043         if ((flags & CFS_HASH_REHASH) != 0)
1044                 flags |= CFS_HASH_COUNTER; /* must have counter */
1045
1046         LASSERT(cur_bits > 0);
1047         LASSERT(cur_bits >= bkt_bits);
1048         LASSERT(max_bits >= cur_bits && max_bits < 31);
1049         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1050         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1051                      (flags & CFS_HASH_NO_LOCK) == 0));
1052         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1053                       ops->hs_keycpy != NULL));
1054
1055         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1056               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1057         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1058         if (hs == NULL)
1059                 RETURN(NULL);
1060
1061         strncpy(hs->hs_name, name, len);
1062         hs->hs_name[len - 1] = '\0';
1063         hs->hs_flags = flags;
1064
1065         atomic_set(&hs->hs_refcount, 1);
1066         atomic_set(&hs->hs_count, 0);
1067
1068         cfs_hash_lock_setup(hs);
1069         cfs_hash_hlist_setup(hs);
1070
1071         hs->hs_cur_bits = (__u8)cur_bits;
1072         hs->hs_min_bits = (__u8)cur_bits;
1073         hs->hs_max_bits = (__u8)max_bits;
1074         hs->hs_bkt_bits = (__u8)bkt_bits;
1075
1076         hs->hs_ops         = ops;
1077         hs->hs_extra_bytes = extra_bytes;
1078         hs->hs_rehash_bits = 0;
1079         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1080         cfs_hash_depth_wi_init(hs);
1081
1082         if (cfs_hash_with_rehash(hs))
1083                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1084
1085         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1086                                                   CFS_HASH_NBKT(hs));
1087         if (hs->hs_buckets != NULL)
1088                 return hs;
1089
1090         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1091         RETURN(NULL);
1092 }
1093 EXPORT_SYMBOL(cfs_hash_create);
1094
1095 /**
1096  * Cleanup libcfs hash @hs.
1097  */
1098 static void
1099 cfs_hash_destroy(cfs_hash_t *hs)
1100 {
1101         struct hlist_node     *hnode;
1102         struct hlist_node     *pos;
1103         cfs_hash_bd_t         bd;
1104         int                   i;
1105         ENTRY;
1106
1107         LASSERT(hs != NULL);
1108         LASSERT(!cfs_hash_is_exiting(hs) &&
1109                 !cfs_hash_is_iterating(hs));
1110
1111         /**
1112          * prohibit further rehashes, don't need any lock because
1113          * I'm the only (last) one can change it.
1114          */
1115         hs->hs_exiting = 1;
1116         if (cfs_hash_with_rehash(hs))
1117                 cfs_hash_rehash_cancel(hs);
1118
1119         cfs_hash_depth_wi_cancel(hs);
1120         /* rehash should be done/canceled */
1121         LASSERT(hs->hs_buckets != NULL &&
1122                 hs->hs_rehash_buckets == NULL);
1123
1124         cfs_hash_for_each_bucket(hs, &bd, i) {
1125                 struct hlist_head *hhead;
1126
1127                 LASSERT(bd.bd_bucket != NULL);
1128                 /* no need to take this lock, just for consistent code */
1129                 cfs_hash_bd_lock(hs, &bd, 1);
1130
1131                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1132                         hlist_for_each_safe(hnode, pos, hhead) {
1133                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1134                                         "hash %s bucket %u(%u) is not "
1135                                         " empty: %u items left\n",
1136                                         hs->hs_name, bd.bd_bucket->hsb_index,
1137                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1138                                 /* can't assert key valicate, because we
1139                                  * can interrupt rehash */
1140                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1141                                 cfs_hash_exit(hs, hnode);
1142                         }
1143                 }
1144                 LASSERT(bd.bd_bucket->hsb_count == 0);
1145                 cfs_hash_bd_unlock(hs, &bd, 1);
1146                 cond_resched();
1147         }
1148
1149         LASSERT(atomic_read(&hs->hs_count) == 0);
1150
1151         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1152                               0, CFS_HASH_NBKT(hs));
1153         i = cfs_hash_with_bigname(hs) ?
1154             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1155         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1156
1157         EXIT;
1158 }
1159
1160 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1161 {
1162         if (atomic_inc_not_zero(&hs->hs_refcount))
1163                 return hs;
1164         return NULL;
1165 }
1166 EXPORT_SYMBOL(cfs_hash_getref);
1167
1168 void cfs_hash_putref(cfs_hash_t *hs)
1169 {
1170         if (atomic_dec_and_test(&hs->hs_refcount))
1171                 cfs_hash_destroy(hs);
1172 }
1173 EXPORT_SYMBOL(cfs_hash_putref);
1174
1175 static inline int
1176 cfs_hash_rehash_bits(cfs_hash_t *hs)
1177 {
1178         if (cfs_hash_with_no_lock(hs) ||
1179             !cfs_hash_with_rehash(hs))
1180                 return -EOPNOTSUPP;
1181
1182         if (unlikely(cfs_hash_is_exiting(hs)))
1183                 return -ESRCH;
1184
1185         if (unlikely(cfs_hash_is_rehashing(hs)))
1186                 return -EALREADY;
1187
1188         if (unlikely(cfs_hash_is_iterating(hs)))
1189                 return -EAGAIN;
1190
1191         /* XXX: need to handle case with max_theta != 2.0
1192          *      and the case with min_theta != 0.5 */
1193         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1194             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1195                 return hs->hs_cur_bits + 1;
1196
1197         if (!cfs_hash_with_shrink(hs))
1198                 return 0;
1199
1200         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1201             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1202                 return hs->hs_cur_bits - 1;
1203
1204         return 0;
1205 }
1206
1207 /**
1208  * don't allow inline rehash if:
1209  * - user wants non-blocking change (add/del) on hash table
1210  * - too many elements
1211  */
1212 static inline int
1213 cfs_hash_rehash_inline(cfs_hash_t *hs)
1214 {
1215         return !cfs_hash_with_nblk_change(hs) &&
1216                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1217 }
1218
1219 /**
1220  * Add item @hnode to libcfs hash @hs using @key.  The registered
1221  * ops->hs_get function will be called when the item is added.
1222  */
1223 void
1224 cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1225 {
1226         cfs_hash_bd_t   bd;
1227         int             bits;
1228
1229         LASSERT(hlist_unhashed(hnode));
1230
1231         cfs_hash_lock(hs, 0);
1232         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1233
1234         cfs_hash_key_validate(hs, key, hnode);
1235         cfs_hash_bd_add_locked(hs, &bd, hnode);
1236
1237         cfs_hash_bd_unlock(hs, &bd, 1);
1238
1239         bits = cfs_hash_rehash_bits(hs);
1240         cfs_hash_unlock(hs, 0);
1241         if (bits > 0)
1242                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1243 }
1244 EXPORT_SYMBOL(cfs_hash_add);
1245
1246 static struct hlist_node *
1247 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1248                      struct hlist_node *hnode, int noref)
1249 {
1250         struct hlist_node *ehnode;
1251         cfs_hash_bd_t     bds[2];
1252         int               bits = 0;
1253
1254         LASSERT(hlist_unhashed(hnode));
1255
1256         cfs_hash_lock(hs, 0);
1257         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1258
1259         cfs_hash_key_validate(hs, key, hnode);
1260         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1261                                                  hnode, noref);
1262         cfs_hash_dual_bd_unlock(hs, bds, 1);
1263
1264         if (ehnode == hnode) /* new item added */
1265                 bits = cfs_hash_rehash_bits(hs);
1266         cfs_hash_unlock(hs, 0);
1267         if (bits > 0)
1268                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1269
1270         return ehnode;
1271 }
1272
1273 /**
1274  * Add item @hnode to libcfs hash @hs using @key.  The registered
1275  * ops->hs_get function will be called if the item was added.
1276  * Returns 0 on success or -EALREADY on key collisions.
1277  */
1278 int
1279 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1280 {
1281         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1282                -EALREADY : 0;
1283 }
1284 EXPORT_SYMBOL(cfs_hash_add_unique);
1285
1286 /**
1287  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1288  * already exists in the hash then ops->hs_get will be called on the
1289  * conflicting entry and that entry will be returned to the caller.
1290  * Otherwise ops->hs_get is called on the item which was added.
1291  */
1292 void *
1293 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1294                         struct hlist_node *hnode)
1295 {
1296         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1297
1298         return cfs_hash_object(hs, hnode);
1299 }
1300 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1301
1302 /**
1303  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1304  * is required to ensure the correct hash bucket is locked since there
1305  * is no direct linkage from the item to the bucket.  The object
1306  * removed from the hash will be returned and obs->hs_put is called
1307  * on the removed object.
1308  */
1309 void *
1310 cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1311 {
1312         void           *obj  = NULL;
1313         int             bits = 0;
1314         cfs_hash_bd_t   bds[2];
1315
1316         cfs_hash_lock(hs, 0);
1317         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1318
1319         /* NB: do nothing if @hnode is not in hash table */
1320         if (hnode == NULL || !hlist_unhashed(hnode)) {
1321                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1322                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1323                 } else {
1324                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1325                                                                 key, hnode);
1326                 }
1327         }
1328
1329         if (hnode != NULL) {
1330                 obj  = cfs_hash_object(hs, hnode);
1331                 bits = cfs_hash_rehash_bits(hs);
1332         }
1333
1334         cfs_hash_dual_bd_unlock(hs, bds, 1);
1335         cfs_hash_unlock(hs, 0);
1336         if (bits > 0)
1337                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1338
1339         return obj;
1340 }
1341 EXPORT_SYMBOL(cfs_hash_del);
1342
1343 /**
1344  * Delete item given @key in libcfs hash @hs.  The first @key found in
1345  * the hash will be removed, if the key exists multiple times in the hash
1346  * @hs this function must be called once per key.  The removed object
1347  * will be returned and ops->hs_put is called on the removed object.
1348  */
1349 void *
1350 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1351 {
1352         return cfs_hash_del(hs, key, NULL);
1353 }
1354 EXPORT_SYMBOL(cfs_hash_del_key);
1355
1356 /**
1357  * Lookup an item using @key in the libcfs hash @hs and return it.
1358  * If the @key is found in the hash hs->hs_get() is called and the
1359  * matching objects is returned.  It is the callers responsibility
1360  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1361  * when when finished with the object.  If the @key was not found
1362  * in the hash @hs NULL is returned.
1363  */
1364 void *
1365 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1366 {
1367         void                 *obj = NULL;
1368         struct hlist_node     *hnode;
1369         cfs_hash_bd_t         bds[2];
1370
1371         cfs_hash_lock(hs, 0);
1372         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1373
1374         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1375         if (hnode != NULL)
1376                 obj = cfs_hash_object(hs, hnode);
1377
1378         cfs_hash_dual_bd_unlock(hs, bds, 0);
1379         cfs_hash_unlock(hs, 0);
1380
1381         return obj;
1382 }
1383 EXPORT_SYMBOL(cfs_hash_lookup);
1384
1385 static void
1386 cfs_hash_for_each_enter(cfs_hash_t *hs)
1387 {
1388         LASSERT(!cfs_hash_is_exiting(hs));
1389
1390         if (!cfs_hash_with_rehash(hs))
1391                 return;
1392         /*
1393          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1394          * because it's just an unreliable signal to rehash-thread,
1395          * rehash-thread will try to finsih rehash ASAP when seeing this.
1396          */
1397         hs->hs_iterating = 1;
1398
1399         cfs_hash_lock(hs, 1);
1400         hs->hs_iterators++;
1401
1402         /* NB: iteration is mostly called by service thread,
1403          * we tend to cancel pending rehash-requst, instead of
1404          * blocking service thread, we will relaunch rehash request
1405          * after iteration */
1406         if (cfs_hash_is_rehashing(hs))
1407                 cfs_hash_rehash_cancel_locked(hs);
1408         cfs_hash_unlock(hs, 1);
1409 }
1410
1411 static void
1412 cfs_hash_for_each_exit(cfs_hash_t *hs)
1413 {
1414         int remained;
1415         int bits;
1416
1417         if (!cfs_hash_with_rehash(hs))
1418                 return;
1419         cfs_hash_lock(hs, 1);
1420         remained = --hs->hs_iterators;
1421         bits = cfs_hash_rehash_bits(hs);
1422         cfs_hash_unlock(hs, 1);
1423         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1424         if (remained == 0)
1425                 hs->hs_iterating = 0;
1426         if (bits > 0) {
1427                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1428                                     CFS_HASH_LOOP_HOG);
1429         }
1430 }
1431
1432 /**
1433  * For each item in the libcfs hash @hs call the passed callback @func
1434  * and pass to it as an argument each hash item and the private @data.
1435  *
1436  * a) the function may sleep!
1437  * b) during the callback:
1438  *    . the bucket lock is held so the callback must never sleep.
1439  *    . if @removal_safe is true, use can remove current item by
1440  *      cfs_hash_bd_del_locked
1441  */
1442 static __u64
1443 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1444                         void *data, int remove_safe)
1445 {
1446         struct hlist_node       *hnode;
1447         struct hlist_node       *pos;
1448         cfs_hash_bd_t           bd;
1449         __u64                   count = 0;
1450         int                     excl  = !!remove_safe;
1451         int                     loop  = 0;
1452         int                     i;
1453         ENTRY;
1454
1455         cfs_hash_for_each_enter(hs);
1456
1457         cfs_hash_lock(hs, 0);
1458         LASSERT(!cfs_hash_is_rehashing(hs));
1459
1460         cfs_hash_for_each_bucket(hs, &bd, i) {
1461                 struct hlist_head *hhead;
1462
1463                 cfs_hash_bd_lock(hs, &bd, excl);
1464                 if (func == NULL) { /* only glimpse size */
1465                         count += bd.bd_bucket->hsb_count;
1466                         cfs_hash_bd_unlock(hs, &bd, excl);
1467                         continue;
1468                 }
1469
1470                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1471                         hlist_for_each_safe(hnode, pos, hhead) {
1472                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1473                                 count++;
1474                                 loop++;
1475                                 if (func(hs, &bd, hnode, data)) {
1476                                         cfs_hash_bd_unlock(hs, &bd, excl);
1477                                         goto out;
1478                                 }
1479                         }
1480                 }
1481                 cfs_hash_bd_unlock(hs, &bd, excl);
1482                 if (loop < CFS_HASH_LOOP_HOG)
1483                         continue;
1484                 loop = 0;
1485                 cfs_hash_unlock(hs, 0);
1486                 cond_resched();
1487                 cfs_hash_lock(hs, 0);
1488         }
1489  out:
1490         cfs_hash_unlock(hs, 0);
1491
1492         cfs_hash_for_each_exit(hs);
1493         RETURN(count);
1494 }
1495
1496 typedef struct {
1497         cfs_hash_cond_opt_cb_t  func;
1498         void                   *arg;
1499 } cfs_hash_cond_arg_t;
1500
1501 static int
1502 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1503                          struct hlist_node *hnode, void *data)
1504 {
1505         cfs_hash_cond_arg_t *cond = data;
1506
1507         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1508                 cfs_hash_bd_del_locked(hs, bd, hnode);
1509         return 0;
1510 }
1511
1512 /**
1513  * Delete item from the libcfs hash @hs when @func return true.
1514  * The write lock being hold during loop for each bucket to avoid
1515  * any object be reference.
1516  */
1517 void
1518 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1519 {
1520         cfs_hash_cond_arg_t arg = {
1521                 .func   = func,
1522                 .arg    = data,
1523         };
1524
1525         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1526 }
1527 EXPORT_SYMBOL(cfs_hash_cond_del);
1528
1529 void
1530 cfs_hash_for_each(cfs_hash_t *hs,
1531                   cfs_hash_for_each_cb_t func, void *data)
1532 {
1533         cfs_hash_for_each_tight(hs, func, data, 0);
1534 }
1535 EXPORT_SYMBOL(cfs_hash_for_each);
1536
1537 void
1538 cfs_hash_for_each_safe(cfs_hash_t *hs,
1539                        cfs_hash_for_each_cb_t func, void *data)
1540 {
1541         cfs_hash_for_each_tight(hs, func, data, 1);
1542 }
1543 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1544
1545 static int
1546 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1547               struct hlist_node *hnode, void *data)
1548 {
1549         *(int *)data = 0;
1550         return 1; /* return 1 to break the loop */
1551 }
1552
1553 int
1554 cfs_hash_is_empty(cfs_hash_t *hs)
1555 {
1556         int empty = 1;
1557
1558         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1559         return empty;
1560 }
1561 EXPORT_SYMBOL(cfs_hash_is_empty);
1562
1563 __u64
1564 cfs_hash_size_get(cfs_hash_t *hs)
1565 {
1566         return cfs_hash_with_counter(hs) ?
1567                atomic_read(&hs->hs_count) :
1568                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1569 }
1570 EXPORT_SYMBOL(cfs_hash_size_get);
1571
1572 /*
1573  * cfs_hash_for_each_relax:
1574  * Iterate the hash table and call @func on each item without
1575  * any lock. This function can't guarantee to finish iteration
1576  * if these features are enabled:
1577  *
1578  *  a. if rehash_key is enabled, an item can be moved from
1579  *     one bucket to another bucket
1580  *  b. user can remove non-zero-ref item from hash-table,
1581  *     so the item can be removed from hash-table, even worse,
1582  *     it's possible that user changed key and insert to another
1583  *     hash bucket.
1584  * there's no way for us to finish iteration correctly on previous
1585  * two cases, so iteration has to be stopped on change.
1586  */
1587 static int
1588 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1589 {
1590         struct hlist_node *hnode;
1591         struct hlist_node *tmp;
1592         cfs_hash_bd_t     bd;
1593         __u32             version;
1594         int               count = 0;
1595         int               stop_on_change;
1596         int               rc;
1597         int               i;
1598         ENTRY;
1599
1600         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1601                          !cfs_hash_with_no_itemref(hs) ||
1602                          CFS_HOP(hs, put_locked) == NULL;
1603         cfs_hash_lock(hs, 0);
1604         LASSERT(!cfs_hash_is_rehashing(hs));
1605
1606         cfs_hash_for_each_bucket(hs, &bd, i) {
1607                 struct hlist_head *hhead;
1608
1609                 cfs_hash_bd_lock(hs, &bd, 0);
1610                 version = cfs_hash_bd_version_get(&bd);
1611
1612                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1613                         for (hnode = hhead->first; hnode != NULL;) {
1614                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1615                                 cfs_hash_get(hs, hnode);
1616                                 cfs_hash_bd_unlock(hs, &bd, 0);
1617                                 cfs_hash_unlock(hs, 0);
1618
1619                                 rc = func(hs, &bd, hnode, data);
1620                                 if (stop_on_change)
1621                                         cfs_hash_put(hs, hnode);
1622                                 cond_resched();
1623                                 count++;
1624
1625                                 cfs_hash_lock(hs, 0);
1626                                 cfs_hash_bd_lock(hs, &bd, 0);
1627                                 if (!stop_on_change) {
1628                                         tmp = hnode->next;
1629                                         cfs_hash_put_locked(hs, hnode);
1630                                         hnode = tmp;
1631                                 } else { /* bucket changed? */
1632                                         if (version !=
1633                                             cfs_hash_bd_version_get(&bd))
1634                                                 break;
1635                                         /* safe to continue because no change */
1636                                         hnode = hnode->next;
1637                                 }
1638                                 if (rc) /* callback wants to break iteration */
1639                                         break;
1640                         }
1641                 }
1642                 cfs_hash_bd_unlock(hs, &bd, 0);
1643         }
1644         cfs_hash_unlock(hs, 0);
1645
1646         return count;
1647 }
1648
1649 int
1650 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1651                          cfs_hash_for_each_cb_t func, void *data)
1652 {
1653         ENTRY;
1654
1655         if (cfs_hash_with_no_lock(hs) ||
1656             cfs_hash_with_rehash_key(hs) ||
1657             !cfs_hash_with_no_itemref(hs))
1658                 RETURN(-EOPNOTSUPP);
1659
1660         if (CFS_HOP(hs, get) == NULL ||
1661             (CFS_HOP(hs, put) == NULL &&
1662              CFS_HOP(hs, put_locked) == NULL))
1663                 RETURN(-EOPNOTSUPP);
1664
1665         cfs_hash_for_each_enter(hs);
1666         cfs_hash_for_each_relax(hs, func, data);
1667         cfs_hash_for_each_exit(hs);
1668
1669         RETURN(0);
1670 }
1671 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1672
1673 /**
1674  * For each hash bucket in the libcfs hash @hs call the passed callback
1675  * @func until all the hash buckets are empty.  The passed callback @func
1676  * or the previously registered callback hs->hs_put must remove the item
1677  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1678  * functions.  No rwlocks will be held during the callback @func it is
1679  * safe to sleep if needed.  This function will not terminate until the
1680  * hash is empty.  Note it is still possible to concurrently add new
1681  * items in to the hash.  It is the callers responsibility to ensure
1682  * the required locking is in place to prevent concurrent insertions.
1683  */
1684 int
1685 cfs_hash_for_each_empty(cfs_hash_t *hs,
1686                         cfs_hash_for_each_cb_t func, void *data)
1687 {
1688         unsigned  i = 0;
1689         ENTRY;
1690
1691         if (cfs_hash_with_no_lock(hs))
1692                 return -EOPNOTSUPP;
1693
1694         if (CFS_HOP(hs, get) == NULL ||
1695             (CFS_HOP(hs, put) == NULL &&
1696              CFS_HOP(hs, put_locked) == NULL))
1697                 return -EOPNOTSUPP;
1698
1699         cfs_hash_for_each_enter(hs);
1700         while (cfs_hash_for_each_relax(hs, func, data)) {
1701                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1702                        hs->hs_name, i++);
1703         }
1704         cfs_hash_for_each_exit(hs);
1705         RETURN(0);
1706 }
1707 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1708
1709 void
1710 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1711                         cfs_hash_for_each_cb_t func, void *data)
1712 {
1713         struct hlist_head *hhead;
1714         struct hlist_node *hnode;
1715         cfs_hash_bd_t      bd;
1716
1717         cfs_hash_for_each_enter(hs);
1718         cfs_hash_lock(hs, 0);
1719         if (hindex >= CFS_HASH_NHLIST(hs))
1720                 goto out;
1721
1722         cfs_hash_bd_index_set(hs, hindex, &bd);
1723
1724         cfs_hash_bd_lock(hs, &bd, 0);
1725         hhead = cfs_hash_bd_hhead(hs, &bd);
1726         hlist_for_each(hnode, hhead) {
1727                 if (func(hs, &bd, hnode, data))
1728                         break;
1729         }
1730         cfs_hash_bd_unlock(hs, &bd, 0);
1731 out:
1732         cfs_hash_unlock(hs, 0);
1733         cfs_hash_for_each_exit(hs);
1734 }
1735
1736 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1737
1738 /*
1739  * For each item in the libcfs hash @hs which matches the @key call
1740  * the passed callback @func and pass to it as an argument each hash
1741  * item and the private @data. During the callback the bucket lock
1742  * is held so the callback must never sleep.
1743    */
1744 void
1745 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1746                         cfs_hash_for_each_cb_t func, void *data)
1747 {
1748         struct hlist_node *hnode;
1749         cfs_hash_bd_t      bds[2];
1750         unsigned           i;
1751
1752         cfs_hash_lock(hs, 0);
1753
1754         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1755
1756         cfs_hash_for_each_bd(bds, 2, i) {
1757                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1758
1759                 hlist_for_each(hnode, hlist) {
1760                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1761
1762                         if (cfs_hash_keycmp(hs, key, hnode)) {
1763                                 if (func(hs, &bds[i], hnode, data))
1764                                         break;
1765                         }
1766                 }
1767         }
1768
1769         cfs_hash_dual_bd_unlock(hs, bds, 0);
1770         cfs_hash_unlock(hs, 0);
1771 }
1772 EXPORT_SYMBOL(cfs_hash_for_each_key);
1773
1774 /**
1775  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1776  * to grow the hash size when excessive chaining is detected, or to
1777  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1778  * flag is set in @hs the libcfs hash may be dynamically rehashed
1779  * during addition or removal if the hash's theta value exceeds
1780  * either the hs->hs_min_theta or hs->max_theta values.  By default
1781  * these values are tuned to keep the chained hash depth small, and
1782  * this approach assumes a reasonably uniform hashing function.  The
1783  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1784  */
1785 void
1786 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1787 {
1788         int     i;
1789
1790         /* need hold cfs_hash_lock(hs, 1) */
1791         LASSERT(cfs_hash_with_rehash(hs) &&
1792                 !cfs_hash_with_no_lock(hs));
1793
1794         if (!cfs_hash_is_rehashing(hs))
1795                 return;
1796
1797         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1798                 hs->hs_rehash_bits = 0;
1799                 return;
1800         }
1801
1802         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1803                 cfs_hash_unlock(hs, 1);
1804                 /* raise console warning while waiting too long */
1805                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1806                        "hash %s is still rehashing, rescheded %d\n",
1807                        hs->hs_name, i - 1);
1808                 cond_resched();
1809                 cfs_hash_lock(hs, 1);
1810         }
1811 }
1812 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1813
1814 void
1815 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1816 {
1817         cfs_hash_lock(hs, 1);
1818         cfs_hash_rehash_cancel_locked(hs);
1819         cfs_hash_unlock(hs, 1);
1820 }
1821 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1822
1823 int
1824 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1825 {
1826         int     rc;
1827
1828         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1829
1830         cfs_hash_lock(hs, 1);
1831
1832         rc = cfs_hash_rehash_bits(hs);
1833         if (rc <= 0) {
1834                 cfs_hash_unlock(hs, 1);
1835                 return rc;
1836         }
1837
1838         hs->hs_rehash_bits = rc;
1839         if (!do_rehash) {
1840                 /* launch and return */
1841                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1842                 cfs_hash_unlock(hs, 1);
1843                 return 0;
1844         }
1845
1846         /* rehash right now */
1847         cfs_hash_unlock(hs, 1);
1848
1849         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1850 }
1851 EXPORT_SYMBOL(cfs_hash_rehash);
1852
1853 static int
1854 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1855 {
1856         cfs_hash_bd_t      new;
1857         struct hlist_head *hhead;
1858         struct hlist_node *hnode;
1859         struct hlist_node *pos;
1860         void              *key;
1861         int                c = 0;
1862
1863         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1864         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1865                 hlist_for_each_safe(hnode, pos, hhead) {
1866                         key = cfs_hash_key(hs, hnode);
1867                         LASSERT(key != NULL);
1868                         /* Validate hnode is in the correct bucket. */
1869                         cfs_hash_bucket_validate(hs, old, hnode);
1870                         /*
1871                          * Delete from old hash bucket; move to new bucket.
1872                          * ops->hs_key must be defined.
1873                          */
1874                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1875                                              hs->hs_rehash_bits, key, &new);
1876                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1877                         c++;
1878                 }
1879         }
1880         return c;
1881 }
1882
1883 static int
1884 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1885 {
1886         cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1887         cfs_hash_bucket_t **bkts;
1888         cfs_hash_bd_t       bd;
1889         unsigned int        old_size;
1890         unsigned int        new_size;
1891         int                 bsize;
1892         int                 count = 0;
1893         int                 rc = 0;
1894         int                 i;
1895
1896         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1897
1898         cfs_hash_lock(hs, 0);
1899         LASSERT(cfs_hash_is_rehashing(hs));
1900
1901         old_size = CFS_HASH_NBKT(hs);
1902         new_size = CFS_HASH_RH_NBKT(hs);
1903
1904         cfs_hash_unlock(hs, 0);
1905
1906         /*
1907          * don't need hs::hs_rwlock for hs::hs_buckets,
1908          * because nobody can change bkt-table except me.
1909          */
1910         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1911                                         old_size, new_size);
1912         cfs_hash_lock(hs, 1);
1913         if (bkts == NULL) {
1914                 rc = -ENOMEM;
1915                 goto out;
1916         }
1917
1918         if (bkts == hs->hs_buckets) {
1919                 bkts = NULL; /* do nothing */
1920                 goto out;
1921         }
1922
1923         rc = __cfs_hash_theta(hs);
1924         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1925                 /* free the new allocated bkt-table */
1926                 old_size = new_size;
1927                 new_size = CFS_HASH_NBKT(hs);
1928                 rc = -EALREADY;
1929                 goto out;
1930         }
1931
1932         LASSERT(hs->hs_rehash_buckets == NULL);
1933         hs->hs_rehash_buckets = bkts;
1934
1935         rc = 0;
1936         cfs_hash_for_each_bucket(hs, &bd, i) {
1937                 if (cfs_hash_is_exiting(hs)) {
1938                         rc = -ESRCH;
1939                         /* someone wants to destroy the hash, abort now */
1940                         if (old_size < new_size) /* OK to free old bkt-table */
1941                                 break;
1942                         /* it's shrinking, need free new bkt-table */
1943                         hs->hs_rehash_buckets = NULL;
1944                         old_size = new_size;
1945                         new_size = CFS_HASH_NBKT(hs);
1946                         goto out;
1947                 }
1948
1949                 count += cfs_hash_rehash_bd(hs, &bd);
1950                 if (count < CFS_HASH_LOOP_HOG ||
1951                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1952                         continue;
1953                 }
1954
1955                 count = 0;
1956                 cfs_hash_unlock(hs, 1);
1957                 cond_resched();
1958                 cfs_hash_lock(hs, 1);
1959         }
1960
1961         hs->hs_rehash_count++;
1962
1963         bkts = hs->hs_buckets;
1964         hs->hs_buckets = hs->hs_rehash_buckets;
1965         hs->hs_rehash_buckets = NULL;
1966
1967         hs->hs_cur_bits = hs->hs_rehash_bits;
1968  out:
1969         hs->hs_rehash_bits = 0;
1970         if (rc == -ESRCH) /* never be scheduled again */
1971                 cfs_wi_exit(cfs_sched_rehash, wi);
1972         bsize = cfs_hash_bkt_size(hs);
1973         cfs_hash_unlock(hs, 1);
1974         /* can't refer to @hs anymore because it could be destroyed */
1975         if (bkts != NULL)
1976                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1977         if (rc != 0)
1978                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1979         /* return 1 only if cfs_wi_exit is called */
1980         return rc == -ESRCH;
1981 }
1982
1983 /**
1984  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1985  * @old_key must be provided to locate the objects previous location
1986  * in the hash, and the @new_key will be used to reinsert the object.
1987  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1988  * combo when it is critical that there is no window in time where the
1989  * object is missing from the hash.  When an object is being rehashed
1990  * the registered cfs_hash_get() and cfs_hash_put() functions will
1991  * not be called.
1992  */
1993 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1994                          void *new_key, struct hlist_node *hnode)
1995 {
1996         cfs_hash_bd_t        bds[3];
1997         cfs_hash_bd_t        old_bds[2];
1998         cfs_hash_bd_t        new_bd;
1999
2000         LASSERT(!hlist_unhashed(hnode));
2001
2002         cfs_hash_lock(hs, 0);
2003
2004         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2005         cfs_hash_bd_get(hs, new_key, &new_bd);
2006
2007         bds[0] = old_bds[0];
2008         bds[1] = old_bds[1];
2009         bds[2] = new_bd;
2010
2011         /* NB: bds[0] and bds[1] are ordered already */
2012         cfs_hash_bd_order(&bds[1], &bds[2]);
2013         cfs_hash_bd_order(&bds[0], &bds[1]);
2014
2015         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2016         if (likely(old_bds[1].bd_bucket == NULL)) {
2017                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2018         } else {
2019                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2020                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2021         }
2022         /* overwrite key inside locks, otherwise may screw up with
2023          * other operations, i.e: rehash */
2024         cfs_hash_keycpy(hs, hnode, new_key);
2025
2026         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2027         cfs_hash_unlock(hs, 0);
2028 }
2029 EXPORT_SYMBOL(cfs_hash_rehash_key);
2030
2031 #ifndef HAVE_ONLY_PROCFS_SEQ
2032 int cfs_hash_debug_header(char *str, int size)
2033 {
2034         return snprintf(str, size, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2035                  CFS_HASH_BIGNAME_LEN,
2036                  "name", "cur", "min", "max", "theta", "t-min", "t-max",
2037                  "flags", "rehash", "count", "maxdep", "maxdepb",
2038                  " distribution");
2039 }
2040 EXPORT_SYMBOL(cfs_hash_debug_header);
2041 #endif
2042
2043 int cfs_hash_debug_header_seq(struct seq_file *m)
2044 {
2045         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2046                         CFS_HASH_BIGNAME_LEN,
2047                         "name", "cur", "min", "max", "theta", "t-min", "t-max",
2048                         "flags", "rehash", "count", "maxdep", "maxdepb",
2049                         " distribution");
2050 }
2051 EXPORT_SYMBOL(cfs_hash_debug_header_seq);
2052
2053 static cfs_hash_bucket_t **
2054 cfs_hash_full_bkts(cfs_hash_t *hs)
2055 {
2056         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2057         if (hs->hs_rehash_buckets == NULL)
2058                 return hs->hs_buckets;
2059
2060         LASSERT(hs->hs_rehash_bits != 0);
2061         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2062                hs->hs_rehash_buckets : hs->hs_buckets;
2063 }
2064
2065 static unsigned int
2066 cfs_hash_full_nbkt(cfs_hash_t *hs)
2067 {
2068         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2069         if (hs->hs_rehash_buckets == NULL)
2070                 return CFS_HASH_NBKT(hs);
2071
2072         LASSERT(hs->hs_rehash_bits != 0);
2073         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2074                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2075 }
2076
2077 #ifndef HAVE_ONLY_PROCFS_SEQ
2078 int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
2079 {
2080         int                    dist[8] = { 0, };
2081         int                    maxdep  = -1;
2082         int                    maxdepb = -1;
2083         int                    total   = 0;
2084         int                    c       = 0;
2085         int                    theta;
2086         int                    i;
2087
2088         if (str == NULL || size == 0)
2089                 return 0;
2090
2091         cfs_hash_lock(hs, 0);
2092         theta = __cfs_hash_theta(hs);
2093
2094         c += snprintf(str + c, size - c, "%-*s ",
2095                       CFS_HASH_BIGNAME_LEN, hs->hs_name);
2096         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_cur_bits);
2097         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_min_bits);
2098         c += snprintf(str + c, size - c, "%5d ",  1 << hs->hs_max_bits);
2099         c += snprintf(str + c, size - c, "%d.%03d ",
2100                       __cfs_hash_theta_int(theta),
2101                       __cfs_hash_theta_frac(theta));
2102         c += snprintf(str + c, size - c, "%d.%03d ",
2103                       __cfs_hash_theta_int(hs->hs_min_theta),
2104                       __cfs_hash_theta_frac(hs->hs_min_theta));
2105         c += snprintf(str + c, size - c, "%d.%03d ",
2106                       __cfs_hash_theta_int(hs->hs_max_theta),
2107                       __cfs_hash_theta_frac(hs->hs_max_theta));
2108         c += snprintf(str + c, size - c, " 0x%02x ", hs->hs_flags);
2109         c += snprintf(str + c, size - c, "%6d ", hs->hs_rehash_count);
2110
2111         /*
2112          * The distribution is a summary of the chained hash depth in
2113          * each of the libcfs hash buckets.  Each buckets hsb_count is
2114          * divided by the hash theta value and used to generate a
2115          * histogram of the hash distribution.  A uniform hash will
2116          * result in all hash buckets being close to the average thus
2117          * only the first few entries in the histogram will be non-zero.
2118          * If you hash function results in a non-uniform hash the will
2119          * be observable by outlier bucks in the distribution histogram.
2120          *
2121          * Uniform hash distribution:      128/128/0/0/0/0/0/0
2122          * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2123          */
2124         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2125                 cfs_hash_bd_t  bd;
2126
2127                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2128                 cfs_hash_bd_lock(hs, &bd, 0);
2129                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2130                         maxdep  = bd.bd_bucket->hsb_depmax;
2131 #ifdef __KERNEL__
2132                         maxdepb = ffz(~maxdep);
2133 #endif
2134                 }
2135                 total += bd.bd_bucket->hsb_count;
2136                 dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2137                 cfs_hash_bd_unlock(hs, &bd, 0);
2138         }
2139
2140         c += snprintf(str + c, size - c, "%7d ", total);
2141         c += snprintf(str + c, size - c, "%7d ", maxdep);
2142         c += snprintf(str + c, size - c, "%7d ", maxdepb);
2143         for (i = 0; i < 8; i++)
2144                 c += snprintf(str + c, size - c, "%d%c",  dist[i],
2145                               (i == 7) ? '\n' : '/');
2146
2147         cfs_hash_unlock(hs, 0);
2148
2149         return c;
2150 }
2151 EXPORT_SYMBOL(cfs_hash_debug_str);
2152 #endif
2153
2154 int cfs_hash_debug_str_seq(cfs_hash_t *hs, struct seq_file *m)
2155 {
2156         int     dist[8] = { 0, };
2157         int     maxdep  = -1;
2158         int     maxdepb = -1;
2159         int     total   = 0;
2160         int     c       = 0;
2161         int     theta;
2162         int     i;
2163
2164         cfs_hash_lock(hs, 0);
2165         theta = __cfs_hash_theta(hs);
2166
2167         c += seq_printf(m, "%-*s ", CFS_HASH_BIGNAME_LEN, hs->hs_name);
2168         c += seq_printf(m, "%5d ",  1 << hs->hs_cur_bits);
2169         c += seq_printf(m, "%5d ",  1 << hs->hs_min_bits);
2170         c += seq_printf(m, "%5d ",  1 << hs->hs_max_bits);
2171         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(theta),
2172                         __cfs_hash_theta_frac(theta));
2173         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_min_theta),
2174                         __cfs_hash_theta_frac(hs->hs_min_theta));
2175         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_max_theta),
2176                         __cfs_hash_theta_frac(hs->hs_max_theta));
2177         c += seq_printf(m, " 0x%02x ", hs->hs_flags);
2178         c += seq_printf(m, "%6d ", hs->hs_rehash_count);
2179
2180         /*
2181          * The distribution is a summary of the chained hash depth in
2182          * each of the libcfs hash buckets.  Each buckets hsb_count is
2183          * divided by the hash theta value and used to generate a
2184          * histogram of the hash distribution.  A uniform hash will
2185          * result in all hash buckets being close to the average thus
2186          * only the first few entries in the histogram will be non-zero.
2187          * If you hash function results in a non-uniform hash the will
2188          * be observable by outlier bucks in the distribution histogram.
2189          *
2190          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2191          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2192          */
2193         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2194                 cfs_hash_bd_t bd;
2195
2196                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2197                 cfs_hash_bd_lock(hs, &bd, 0);
2198                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2199                         maxdep  = bd.bd_bucket->hsb_depmax;
2200 #ifdef __KERNEL__
2201                         maxdepb = ffz(~maxdep);
2202 #endif
2203                 }
2204                 total += bd.bd_bucket->hsb_count;
2205                 dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2206                 cfs_hash_bd_unlock(hs, &bd, 0);
2207         }
2208
2209         c += seq_printf(m, "%7d ", total);
2210         c += seq_printf(m, "%7d ", maxdep);
2211         c += seq_printf(m, "%7d ", maxdepb);
2212         for (i = 0; i < 8; i++)
2213                 c += seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2214
2215         cfs_hash_unlock(hs, 0);
2216         return c;
2217 }
2218 EXPORT_SYMBOL(cfs_hash_debug_str_seq);