Whamcloud - gitweb
LU-6068 misc: update Intel copyright messages 2014
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/hash.c
37  *
38  * Implement a hash class for hash process in lustre system.
39  *
40  * Author: YuZhangyong <yzy@clusterfs.com>
41  *
42  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43  * - Simplified API and improved documentation
44  * - Added per-hash feature flags:
45  *   * CFS_HASH_DEBUG additional validation
46  *   * CFS_HASH_REHASH dynamic rehashing
47  * - Added per-hash statistics
48  * - General performance enhancements
49  *
50  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51  * - move all stuff to libcfs
52  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54  * - buckets are allocated one by one(intead of contiguous memory),
55  *   to avoid unnecessary cacheline conflict
56  *
57  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58  * - "bucket" is a group of hlist_head now, user can speicify bucket size
59  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60  *   one lock for reducing memory overhead.
61  *
62  * - support lockless hash, caller will take care of locks:
63  *   avoid lock overhead for hash tables that are already protected
64  *   by locking in the caller for another reason
65  *
66  * - support both spin_lock/rwlock for bucket:
67  *   overhead of spinlock contention is lower than read/write
68  *   contention of rwlock, so using spinlock to serialize operations on
69  *   bucket is more reasonable for those frequently changed hash tables
70  *
71  * - support one-single lock mode:
72  *   one lock to protect all hash operations to avoid overhead of
73  *   multiple locks if hash table is always small
74  *
75  * - removed a lot of unnecessary addref & decref on hash element:
76  *   addref & decref are atomic operations in many use-cases which
77  *   are expensive.
78  *
79  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80  *   some lustre use-cases require these functions to be strictly
81  *   non-blocking, we need to schedule required rehash on a different
82  *   thread on those cases.
83  *
84  * - safer rehash on large hash table
85  *   In old implementation, rehash function will exclusively lock the
86  *   hash table and finish rehash in one batch, it's dangerous on SMP
87  *   system because rehash millions of elements could take long time.
88  *   New implemented rehash can release lock and relax CPU in middle
89  *   of rehash, it's safe for another thread to search/change on the
90  *   hash table even it's in rehasing.
91  *
92  * - support two different refcount modes
93  *   . hash table has refcount on element
94  *   . hash table doesn't change refcount on adding/removing element
95  *
96  * - support long name hash table (for param-tree)
97  *
98  * - fix a bug for cfs_hash_rehash_key:
99  *   in old implementation, cfs_hash_rehash_key could screw up the
100  *   hash-table because @key is overwritten without any protection.
101  *   Now we need user to define hs_keycpy for those rehash enabled
102  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
103  *   inside lock by calling hs_keycpy.
104  *
105  * - better hash iteration:
106  *   Now we support both locked iteration & lockless iteration of hash
107  *   table. Also, user can break the iteration by return 1 in callback.
108  */
109
110 #include <libcfs/libcfs.h>
111
112 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
113 static unsigned int warn_on_depth = 8;
114 CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
115                 "warning when hash depth is high.");
116 #endif
117
118 struct cfs_wi_sched *cfs_sched_rehash;
119
120 static inline void
121 cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
122
123 static inline void
124 cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
125
126 static inline void
127 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
128 __acquires(&lock->spin)
129 {
130         spin_lock(&lock->spin);
131 }
132
133 static inline void
134 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
135 __releases(&lock->spin)
136 {
137         spin_unlock(&lock->spin);
138 }
139
140 static inline void
141 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
142 __acquires(&lock->rw)
143 {
144         if (!exclusive)
145                 read_lock(&lock->rw);
146         else
147                 write_lock(&lock->rw);
148 }
149
150 static inline void
151 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
152 __releases(&lock->rw)
153 {
154         if (!exclusive)
155                 read_unlock(&lock->rw);
156         else
157                 write_unlock(&lock->rw);
158 }
159
160 /** No lock hash */
161 static cfs_hash_lock_ops_t cfs_hash_nl_lops =
162 {
163         .hs_lock        = cfs_hash_nl_lock,
164         .hs_unlock      = cfs_hash_nl_unlock,
165         .hs_bkt_lock    = cfs_hash_nl_lock,
166         .hs_bkt_unlock  = cfs_hash_nl_unlock,
167 };
168
169 /** no bucket lock, one spinlock to protect everything */
170 static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
171 {
172         .hs_lock        = cfs_hash_spin_lock,
173         .hs_unlock      = cfs_hash_spin_unlock,
174         .hs_bkt_lock    = cfs_hash_nl_lock,
175         .hs_bkt_unlock  = cfs_hash_nl_unlock,
176 };
177
178 /** spin bucket lock, rehash is enabled */
179 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
180 {
181         .hs_lock        = cfs_hash_rw_lock,
182         .hs_unlock      = cfs_hash_rw_unlock,
183         .hs_bkt_lock    = cfs_hash_spin_lock,
184         .hs_bkt_unlock  = cfs_hash_spin_unlock,
185 };
186
187 /** rw bucket lock, rehash is enabled */
188 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
189 {
190         .hs_lock        = cfs_hash_rw_lock,
191         .hs_unlock      = cfs_hash_rw_unlock,
192         .hs_bkt_lock    = cfs_hash_rw_lock,
193         .hs_bkt_unlock  = cfs_hash_rw_unlock,
194 };
195
196 /** spin bucket lock, rehash is disabled */
197 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
198 {
199         .hs_lock        = cfs_hash_nl_lock,
200         .hs_unlock      = cfs_hash_nl_unlock,
201         .hs_bkt_lock    = cfs_hash_spin_lock,
202         .hs_bkt_unlock  = cfs_hash_spin_unlock,
203 };
204
205 /** rw bucket lock, rehash is disabled */
206 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
207 {
208         .hs_lock        = cfs_hash_nl_lock,
209         .hs_unlock      = cfs_hash_nl_unlock,
210         .hs_bkt_lock    = cfs_hash_rw_lock,
211         .hs_bkt_unlock  = cfs_hash_rw_unlock,
212 };
213
214 static void
215 cfs_hash_lock_setup(cfs_hash_t *hs)
216 {
217         if (cfs_hash_with_no_lock(hs)) {
218                 hs->hs_lops = &cfs_hash_nl_lops;
219
220         } else if (cfs_hash_with_no_bktlock(hs)) {
221                 hs->hs_lops = &cfs_hash_nbl_lops;
222                 spin_lock_init(&hs->hs_lock.spin);
223
224         } else if (cfs_hash_with_rehash(hs)) {
225                 rwlock_init(&hs->hs_lock.rw);
226
227                 if (cfs_hash_with_rw_bktlock(hs))
228                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
229                 else if (cfs_hash_with_spin_bktlock(hs))
230                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
231                 else
232                         LBUG();
233         } else {
234                 if (cfs_hash_with_rw_bktlock(hs))
235                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
236                 else if (cfs_hash_with_spin_bktlock(hs))
237                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
238                 else
239                         LBUG();
240         }
241 }
242
243 /**
244  * Simple hash head without depth tracking
245  * new element is always added to head of hlist
246  */
247 typedef struct {
248         struct hlist_head       hh_head;        /**< entries list */
249 } cfs_hash_head_t;
250
251 static int
252 cfs_hash_hh_hhead_size(cfs_hash_t *hs)
253 {
254         return sizeof(cfs_hash_head_t);
255 }
256
257 static struct hlist_head *
258 cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
259 {
260         cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
261
262         return &head[bd->bd_offset].hh_head;
263 }
264
265 static int
266 cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
267                       struct hlist_node *hnode)
268 {
269         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
270         return -1; /* unknown depth */
271 }
272
273 static int
274 cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
275                       struct hlist_node *hnode)
276 {
277         hlist_del_init(hnode);
278         return -1; /* unknown depth */
279 }
280
281 /**
282  * Simple hash head with depth tracking
283  * new element is always added to head of hlist
284  */
285 typedef struct {
286         struct hlist_head       hd_head;        /**< entries list */
287         unsigned int            hd_depth;       /**< list length */
288 } cfs_hash_head_dep_t;
289
290 static int
291 cfs_hash_hd_hhead_size(cfs_hash_t *hs)
292 {
293         return sizeof(cfs_hash_head_dep_t);
294 }
295
296 static struct hlist_head *
297 cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
298 {
299         cfs_hash_head_dep_t   *head;
300
301         head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
302         return &head[bd->bd_offset].hd_head;
303 }
304
305 static int
306 cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
307                       struct hlist_node *hnode)
308 {
309         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
310                                                cfs_hash_head_dep_t, hd_head);
311         hlist_add_head(hnode, &hh->hd_head);
312         return ++hh->hd_depth;
313 }
314
315 static int
316 cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
317                       struct hlist_node *hnode)
318 {
319         cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
320                                                cfs_hash_head_dep_t, hd_head);
321         hlist_del_init(hnode);
322         return --hh->hd_depth;
323 }
324
325 /**
326  * double links hash head without depth tracking
327  * new element is always added to tail of hlist
328  */
329 typedef struct {
330         struct hlist_head       dh_head;        /**< entries list */
331         struct hlist_node       *dh_tail;       /**< the last entry */
332 } cfs_hash_dhead_t;
333
334 static int
335 cfs_hash_dh_hhead_size(cfs_hash_t *hs)
336 {
337         return sizeof(cfs_hash_dhead_t);
338 }
339
340 static struct hlist_head *
341 cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
342 {
343         cfs_hash_dhead_t *head;
344
345         head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
346         return &head[bd->bd_offset].dh_head;
347 }
348
349 static int
350 cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
351                       struct hlist_node *hnode)
352 {
353         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
354                                             cfs_hash_dhead_t, dh_head);
355
356         if (dh->dh_tail != NULL) /* not empty */
357                 hlist_add_after(dh->dh_tail, hnode);
358         else /* empty list */
359                 hlist_add_head(hnode, &dh->dh_head);
360         dh->dh_tail = hnode;
361         return -1; /* unknown depth */
362 }
363
364 static int
365 cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
366                       struct hlist_node *hnd)
367 {
368         cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
369                                             cfs_hash_dhead_t, dh_head);
370
371         if (hnd->next == NULL) { /* it's the tail */
372                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
373                               container_of(hnd->pprev, struct hlist_node, next);
374         }
375         hlist_del_init(hnd);
376         return -1; /* unknown depth */
377 }
378
379 /**
380  * double links hash head with depth tracking
381  * new element is always added to tail of hlist
382  */
383 typedef struct {
384         struct hlist_head       dd_head;        /**< entries list */
385         struct hlist_node       *dd_tail;       /**< the last entry */
386         unsigned int            dd_depth;       /**< list length */
387 } cfs_hash_dhead_dep_t;
388
389 static int
390 cfs_hash_dd_hhead_size(cfs_hash_t *hs)
391 {
392         return sizeof(cfs_hash_dhead_dep_t);
393 }
394
395 static struct hlist_head *
396 cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
397 {
398         cfs_hash_dhead_dep_t *head;
399
400         head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
401         return &head[bd->bd_offset].dd_head;
402 }
403
404 static int
405 cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
406                       struct hlist_node *hnode)
407 {
408         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
409                                                 cfs_hash_dhead_dep_t, dd_head);
410
411         if (dh->dd_tail != NULL) /* not empty */
412                 hlist_add_after(dh->dd_tail, hnode);
413         else /* empty list */
414                 hlist_add_head(hnode, &dh->dd_head);
415         dh->dd_tail = hnode;
416         return ++dh->dd_depth;
417 }
418
419 static int
420 cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
421                       struct hlist_node *hnd)
422 {
423         cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
424                                                 cfs_hash_dhead_dep_t, dd_head);
425
426         if (hnd->next == NULL) { /* it's the tail */
427                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
428                               container_of(hnd->pprev, struct hlist_node, next);
429         }
430         hlist_del_init(hnd);
431         return --dh->dd_depth;
432 }
433
434 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
435        .hop_hhead      = cfs_hash_hh_hhead,
436        .hop_hhead_size = cfs_hash_hh_hhead_size,
437        .hop_hnode_add  = cfs_hash_hh_hnode_add,
438        .hop_hnode_del  = cfs_hash_hh_hnode_del,
439 };
440
441 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
442        .hop_hhead      = cfs_hash_hd_hhead,
443        .hop_hhead_size = cfs_hash_hd_hhead_size,
444        .hop_hnode_add  = cfs_hash_hd_hnode_add,
445        .hop_hnode_del  = cfs_hash_hd_hnode_del,
446 };
447
448 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
449        .hop_hhead      = cfs_hash_dh_hhead,
450        .hop_hhead_size = cfs_hash_dh_hhead_size,
451        .hop_hnode_add  = cfs_hash_dh_hnode_add,
452        .hop_hnode_del  = cfs_hash_dh_hnode_del,
453 };
454
455 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
456        .hop_hhead      = cfs_hash_dd_hhead,
457        .hop_hhead_size = cfs_hash_dd_hhead_size,
458        .hop_hnode_add  = cfs_hash_dd_hnode_add,
459        .hop_hnode_del  = cfs_hash_dd_hnode_del,
460 };
461
462 static void
463 cfs_hash_hlist_setup(cfs_hash_t *hs)
464 {
465         if (cfs_hash_with_add_tail(hs)) {
466                 hs->hs_hops = cfs_hash_with_depth(hs) ?
467                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
468         } else {
469                 hs->hs_hops = cfs_hash_with_depth(hs) ?
470                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
471         }
472 }
473
474 static void
475 cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
476                      unsigned int bits, const void *key, cfs_hash_bd_t *bd)
477 {
478         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
479
480         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
481
482         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
483         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
484 }
485
486 void
487 cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
488 {
489         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
490         if (likely(hs->hs_rehash_buckets == NULL)) {
491                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
492                                      hs->hs_cur_bits, key, bd);
493         } else {
494                 LASSERT(hs->hs_rehash_bits != 0);
495                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
496                                      hs->hs_rehash_bits, key, bd);
497         }
498 }
499 EXPORT_SYMBOL(cfs_hash_bd_get);
500
501 static inline void
502 cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
503 {
504         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
505                 return;
506
507         bd->bd_bucket->hsb_depmax = dep_cur;
508 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
509         if (likely(warn_on_depth == 0 ||
510                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
511                 return;
512
513         spin_lock(&hs->hs_dep_lock);
514         hs->hs_dep_max  = dep_cur;
515         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
516         hs->hs_dep_off  = bd->bd_offset;
517         hs->hs_dep_bits = hs->hs_cur_bits;
518         spin_unlock(&hs->hs_dep_lock);
519
520         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
521 # endif
522 }
523
524 void
525 cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
526                         struct hlist_node *hnode)
527 {
528         int rc;
529
530         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
531         cfs_hash_bd_dep_record(hs, bd, rc);
532         bd->bd_bucket->hsb_version++;
533         if (unlikely(bd->bd_bucket->hsb_version == 0))
534                 bd->bd_bucket->hsb_version++;
535         bd->bd_bucket->hsb_count++;
536
537         if (cfs_hash_with_counter(hs))
538                 atomic_inc(&hs->hs_count);
539         if (!cfs_hash_with_no_itemref(hs))
540                 cfs_hash_get(hs, hnode);
541 }
542 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
543
544 void
545 cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
546                        struct hlist_node *hnode)
547 {
548         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
549
550         LASSERT(bd->bd_bucket->hsb_count > 0);
551         bd->bd_bucket->hsb_count--;
552         bd->bd_bucket->hsb_version++;
553         if (unlikely(bd->bd_bucket->hsb_version == 0))
554                 bd->bd_bucket->hsb_version++;
555
556         if (cfs_hash_with_counter(hs)) {
557                 LASSERT(atomic_read(&hs->hs_count) > 0);
558                 atomic_dec(&hs->hs_count);
559         }
560         if (!cfs_hash_with_no_itemref(hs))
561                 cfs_hash_put_locked(hs, hnode);
562 }
563 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
564
565 void
566 cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
567                         cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
568 {
569         cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
570         cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
571         int                rc;
572
573         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
574                 return;
575
576         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
577          * in cfs_hash_bd_del/add_locked */
578         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
579         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
580         cfs_hash_bd_dep_record(hs, bd_new, rc);
581
582         LASSERT(obkt->hsb_count > 0);
583         obkt->hsb_count--;
584         obkt->hsb_version++;
585         if (unlikely(obkt->hsb_version == 0))
586                 obkt->hsb_version++;
587         nbkt->hsb_count++;
588         nbkt->hsb_version++;
589         if (unlikely(nbkt->hsb_version == 0))
590                 nbkt->hsb_version++;
591 }
592 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
593
594 enum {
595         /** always set, for sanity (avoid ZERO intent) */
596         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
597         /** return entry with a ref */
598         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
599         /** add entry if not existing */
600         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
601         /** delete entry, ignore other masks */
602         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
603 };
604
605 typedef enum cfs_hash_lookup_intent {
606         /** return item w/o refcount */
607         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
608         /** return item with refcount */
609         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
610                                        CFS_HS_LOOKUP_MASK_REF),
611         /** return item w/o refcount if existed, otherwise add */
612         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
613                                        CFS_HS_LOOKUP_MASK_ADD),
614         /** return item with refcount if existed, otherwise add */
615         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
616                                        CFS_HS_LOOKUP_MASK_ADD),
617         /** delete if existed */
618         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
619                                        CFS_HS_LOOKUP_MASK_DEL)
620 } cfs_hash_lookup_intent_t;
621
622 static struct hlist_node *
623 cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
624                           const void *key, struct hlist_node *hnode,
625                           cfs_hash_lookup_intent_t intent)
626
627 {
628         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
629         struct hlist_node  *ehnode;
630         struct hlist_node  *match;
631         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
632
633         /* with this function, we can avoid a lot of useless refcount ops,
634          * which are expensive atomic operations most time. */
635         match = intent_add ? NULL : hnode;
636         hlist_for_each(ehnode, hhead) {
637                 if (!cfs_hash_keycmp(hs, key, ehnode))
638                         continue;
639
640                 if (match != NULL && match != ehnode) /* can't match */
641                         continue;
642
643                 /* match and ... */
644                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
645                         cfs_hash_bd_del_locked(hs, bd, ehnode);
646                         return ehnode;
647                 }
648
649                 /* caller wants refcount? */
650                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
651                         cfs_hash_get(hs, ehnode);
652                 return ehnode;
653         }
654         /* no match item */
655         if (!intent_add)
656                 return NULL;
657
658         LASSERT(hnode != NULL);
659         cfs_hash_bd_add_locked(hs, bd, hnode);
660         return hnode;
661 }
662
663 struct hlist_node *
664 cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
665 {
666         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
667                                         CFS_HS_LOOKUP_IT_FIND);
668 }
669 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
670
671 struct hlist_node *
672 cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
673 {
674         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
675                                         CFS_HS_LOOKUP_IT_PEEK);
676 }
677 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
678
679 struct hlist_node *
680 cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
681                            const void *key, struct hlist_node *hnode,
682                            int noref)
683 {
684         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
685                                         CFS_HS_LOOKUP_IT_ADD |
686                                         (!noref * CFS_HS_LOOKUP_MASK_REF));
687 }
688 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
689
690 struct hlist_node *
691 cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
692                            const void *key, struct hlist_node *hnode)
693 {
694         /* hnode can be NULL, we find the first item with @key */
695         return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
696                                         CFS_HS_LOOKUP_IT_FINDDEL);
697 }
698 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
699
700 static void
701 cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
702                        unsigned n, int excl)
703 {
704         cfs_hash_bucket_t *prev = NULL;
705         int                i;
706
707         /**
708          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
709          * NB: it's possible that several bds point to the same bucket but
710          * have different bd::bd_offset, so need take care of deadlock.
711          */
712         cfs_hash_for_each_bd(bds, n, i) {
713                 if (prev == bds[i].bd_bucket)
714                         continue;
715
716                 LASSERT(prev == NULL ||
717                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
718                 cfs_hash_bd_lock(hs, &bds[i], excl);
719                 prev = bds[i].bd_bucket;
720         }
721 }
722
723 static void
724 cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
725                          unsigned n, int excl)
726 {
727         cfs_hash_bucket_t *prev = NULL;
728         int                i;
729
730         cfs_hash_for_each_bd(bds, n, i) {
731                 if (prev != bds[i].bd_bucket) {
732                         cfs_hash_bd_unlock(hs, &bds[i], excl);
733                         prev = bds[i].bd_bucket;
734                 }
735         }
736 }
737
738 static struct hlist_node *
739 cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
740                                 unsigned n, const void *key)
741 {
742         struct hlist_node *ehnode;
743         unsigned          i;
744
745         cfs_hash_for_each_bd(bds, n, i) {
746                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
747                                                         CFS_HS_LOOKUP_IT_FIND);
748                 if (ehnode != NULL)
749                         return ehnode;
750         }
751         return NULL;
752 }
753
754 static struct hlist_node *
755 cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
756                                  cfs_hash_bd_t *bds, unsigned n, const void *key,
757                                  struct hlist_node *hnode, int noref)
758 {
759         struct hlist_node *ehnode;
760         int               intent;
761         unsigned          i;
762
763         LASSERT(hnode != NULL);
764         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
765
766         cfs_hash_for_each_bd(bds, n, i) {
767                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
768                                                    NULL, intent);
769                 if (ehnode != NULL)
770                         return ehnode;
771         }
772
773         if (i == 1) { /* only one bucket */
774                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
775         } else {
776                 cfs_hash_bd_t      mybd;
777
778                 cfs_hash_bd_get(hs, key, &mybd);
779                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
780         }
781
782         return hnode;
783 }
784
785 static struct hlist_node *
786 cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
787                                  unsigned n, const void *key,
788                                  struct hlist_node *hnode)
789 {
790         struct hlist_node *ehnode;
791         unsigned           i;
792
793         cfs_hash_for_each_bd(bds, n, i) {
794                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
795                                                    CFS_HS_LOOKUP_IT_FINDDEL);
796                 if (ehnode != NULL)
797                         return ehnode;
798         }
799         return NULL;
800 }
801
802 static void
803 cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
804 {
805         int     rc;
806
807         if (bd2->bd_bucket == NULL)
808                 return;
809
810         if (bd1->bd_bucket == NULL) {
811                 *bd1 = *bd2;
812                 bd2->bd_bucket = NULL;
813                 return;
814         }
815
816         rc = cfs_hash_bd_compare(bd1, bd2);
817         if (rc == 0) {
818                 bd2->bd_bucket = NULL;
819
820         } else if (rc > 0) { /* swab bd1 and bd2 */
821                 cfs_hash_bd_t tmp;
822
823                 tmp = *bd2;
824                 *bd2 = *bd1;
825                 *bd1 = tmp;
826         }
827 }
828
829 void
830 cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
831 {
832         /* NB: caller should hold hs_lock.rw if REHASH is set */
833         cfs_hash_bd_from_key(hs, hs->hs_buckets,
834                              hs->hs_cur_bits, key, &bds[0]);
835         if (likely(hs->hs_rehash_buckets == NULL)) {
836                 /* no rehash or not rehashing */
837                 bds[1].bd_bucket = NULL;
838                 return;
839         }
840
841         LASSERT(hs->hs_rehash_bits != 0);
842         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
843                              hs->hs_rehash_bits, key, &bds[1]);
844
845         cfs_hash_bd_order(&bds[0], &bds[1]);
846 }
847 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
848
849 void
850 cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
851 {
852         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
853 }
854 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
855
856 void
857 cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
858 {
859         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
860 }
861 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
862
863 struct hlist_node *
864 cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
865                                const void *key)
866 {
867         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
868 }
869 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
870
871 struct hlist_node *
872 cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
873                                 const void *key, struct hlist_node *hnode,
874                                 int noref)
875 {
876         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
877                                                 hnode, noref);
878 }
879 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
880
881 struct hlist_node *
882 cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
883                                 const void *key, struct hlist_node *hnode)
884 {
885         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
886 }
887 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
888
889 static void
890 cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
891                       int bkt_size, int prev_size, int size)
892 {
893         int     i;
894
895         for (i = prev_size; i < size; i++) {
896                 if (buckets[i] != NULL)
897                         LIBCFS_FREE(buckets[i], bkt_size);
898         }
899
900         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
901 }
902
903 /*
904  * Create or grow bucket memory. Return old_buckets if no allocation was
905  * needed, the newly allocated buckets if allocation was needed and
906  * successful, and NULL on error.
907  */
908 static cfs_hash_bucket_t **
909 cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
910                          unsigned int old_size, unsigned int new_size)
911 {
912         cfs_hash_bucket_t **new_bkts;
913         int                 i;
914
915         LASSERT(old_size == 0 || old_bkts != NULL);
916
917         if (old_bkts != NULL && old_size == new_size)
918                 return old_bkts;
919
920         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
921         if (new_bkts == NULL)
922                 return NULL;
923
924         if (old_bkts != NULL) {
925                 memcpy(new_bkts, old_bkts,
926                        min(old_size, new_size) * sizeof(*old_bkts));
927         }
928
929         for (i = old_size; i < new_size; i++) {
930                 struct hlist_head *hhead;
931                 cfs_hash_bd_t     bd;
932
933                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
934                 if (new_bkts[i] == NULL) {
935                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
936                                               old_size, new_size);
937                         return NULL;
938                 }
939
940                 new_bkts[i]->hsb_index   = i;
941                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
942                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
943                 bd.bd_bucket = new_bkts[i];
944                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
945                         INIT_HLIST_HEAD(hhead);
946
947                 if (cfs_hash_with_no_lock(hs) ||
948                     cfs_hash_with_no_bktlock(hs))
949                         continue;
950
951                 if (cfs_hash_with_rw_bktlock(hs))
952                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
953                 else if (cfs_hash_with_spin_bktlock(hs))
954                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
955                 else
956                         LBUG(); /* invalid use-case */
957         }
958         return new_bkts;
959 }
960
961 /**
962  * Initialize new libcfs hash, where:
963  * @name     - Descriptive hash name
964  * @cur_bits - Initial hash table size, in bits
965  * @max_bits - Maximum allowed hash table resize, in bits
966  * @ops      - Registered hash table operations
967  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
968  *           - CFS_HASH_SORT enable chained hash sort
969  */
970 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
971
972 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
973 static int cfs_hash_dep_print(cfs_workitem_t *wi)
974 {
975         cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
976         int         dep;
977         int         bkt;
978         int         off;
979         int         bits;
980
981         spin_lock(&hs->hs_dep_lock);
982         dep  = hs->hs_dep_max;
983         bkt  = hs->hs_dep_bkt;
984         off  = hs->hs_dep_off;
985         bits = hs->hs_dep_bits;
986         spin_unlock(&hs->hs_dep_lock);
987
988         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
989                       hs->hs_name, bits, dep, bkt, off);
990         spin_lock(&hs->hs_dep_lock);
991         hs->hs_dep_bits = 0; /* mark as workitem done */
992         spin_unlock(&hs->hs_dep_lock);
993         return 0;
994 }
995
996 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
997 {
998         spin_lock_init(&hs->hs_dep_lock);
999         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
1000 }
1001
1002 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
1003 {
1004         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1005                 return;
1006
1007         spin_lock(&hs->hs_dep_lock);
1008         while (hs->hs_dep_bits != 0) {
1009                 spin_unlock(&hs->hs_dep_lock);
1010                 cond_resched();
1011                 spin_lock(&hs->hs_dep_lock);
1012         }
1013         spin_unlock(&hs->hs_dep_lock);
1014 }
1015
1016 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1017
1018 static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1019 static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1020
1021 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1022
1023 cfs_hash_t *
1024 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1025                 unsigned bkt_bits, unsigned extra_bytes,
1026                 unsigned min_theta, unsigned max_theta,
1027                 cfs_hash_ops_t *ops, unsigned flags)
1028 {
1029         cfs_hash_t *hs;
1030         int         len;
1031
1032         ENTRY;
1033
1034         CLASSERT(CFS_HASH_THETA_BITS < 15);
1035
1036         LASSERT(name != NULL);
1037         LASSERT(ops != NULL);
1038         LASSERT(ops->hs_key);
1039         LASSERT(ops->hs_hash);
1040         LASSERT(ops->hs_object);
1041         LASSERT(ops->hs_keycmp);
1042         LASSERT(ops->hs_get != NULL);
1043         LASSERT(ops->hs_put_locked != NULL);
1044
1045         if ((flags & CFS_HASH_REHASH) != 0)
1046                 flags |= CFS_HASH_COUNTER; /* must have counter */
1047
1048         LASSERT(cur_bits > 0);
1049         LASSERT(cur_bits >= bkt_bits);
1050         LASSERT(max_bits >= cur_bits && max_bits < 31);
1051         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1052         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1053                      (flags & CFS_HASH_NO_LOCK) == 0));
1054         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1055                       ops->hs_keycpy != NULL));
1056
1057         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1058               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1059         LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1060         if (hs == NULL)
1061                 RETURN(NULL);
1062
1063         strlcpy(hs->hs_name, name, len);
1064         hs->hs_flags = flags;
1065
1066         atomic_set(&hs->hs_refcount, 1);
1067         atomic_set(&hs->hs_count, 0);
1068
1069         cfs_hash_lock_setup(hs);
1070         cfs_hash_hlist_setup(hs);
1071
1072         hs->hs_cur_bits = (__u8)cur_bits;
1073         hs->hs_min_bits = (__u8)cur_bits;
1074         hs->hs_max_bits = (__u8)max_bits;
1075         hs->hs_bkt_bits = (__u8)bkt_bits;
1076
1077         hs->hs_ops         = ops;
1078         hs->hs_extra_bytes = extra_bytes;
1079         hs->hs_rehash_bits = 0;
1080         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1081         cfs_hash_depth_wi_init(hs);
1082
1083         if (cfs_hash_with_rehash(hs))
1084                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1085
1086         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1087                                                   CFS_HASH_NBKT(hs));
1088         if (hs->hs_buckets != NULL)
1089                 return hs;
1090
1091         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1092         RETURN(NULL);
1093 }
1094 EXPORT_SYMBOL(cfs_hash_create);
1095
1096 /**
1097  * Cleanup libcfs hash @hs.
1098  */
1099 static void
1100 cfs_hash_destroy(cfs_hash_t *hs)
1101 {
1102         struct hlist_node     *hnode;
1103         struct hlist_node     *pos;
1104         cfs_hash_bd_t         bd;
1105         int                   i;
1106         ENTRY;
1107
1108         LASSERT(hs != NULL);
1109         LASSERT(!cfs_hash_is_exiting(hs) &&
1110                 !cfs_hash_is_iterating(hs));
1111
1112         /**
1113          * prohibit further rehashes, don't need any lock because
1114          * I'm the only (last) one can change it.
1115          */
1116         hs->hs_exiting = 1;
1117         if (cfs_hash_with_rehash(hs))
1118                 cfs_hash_rehash_cancel(hs);
1119
1120         cfs_hash_depth_wi_cancel(hs);
1121         /* rehash should be done/canceled */
1122         LASSERT(hs->hs_buckets != NULL &&
1123                 hs->hs_rehash_buckets == NULL);
1124
1125         cfs_hash_for_each_bucket(hs, &bd, i) {
1126                 struct hlist_head *hhead;
1127
1128                 LASSERT(bd.bd_bucket != NULL);
1129                 /* no need to take this lock, just for consistent code */
1130                 cfs_hash_bd_lock(hs, &bd, 1);
1131
1132                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1133                         hlist_for_each_safe(hnode, pos, hhead) {
1134                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1135                                         "hash %s bucket %u(%u) is not "
1136                                         " empty: %u items left\n",
1137                                         hs->hs_name, bd.bd_bucket->hsb_index,
1138                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1139                                 /* can't assert key valicate, because we
1140                                  * can interrupt rehash */
1141                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1142                                 cfs_hash_exit(hs, hnode);
1143                         }
1144                 }
1145                 LASSERT(bd.bd_bucket->hsb_count == 0);
1146                 cfs_hash_bd_unlock(hs, &bd, 1);
1147                 cond_resched();
1148         }
1149
1150         LASSERT(atomic_read(&hs->hs_count) == 0);
1151
1152         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1153                               0, CFS_HASH_NBKT(hs));
1154         i = cfs_hash_with_bigname(hs) ?
1155             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1156         LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1157
1158         EXIT;
1159 }
1160
1161 cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1162 {
1163         if (atomic_inc_not_zero(&hs->hs_refcount))
1164                 return hs;
1165         return NULL;
1166 }
1167 EXPORT_SYMBOL(cfs_hash_getref);
1168
1169 void cfs_hash_putref(cfs_hash_t *hs)
1170 {
1171         if (atomic_dec_and_test(&hs->hs_refcount))
1172                 cfs_hash_destroy(hs);
1173 }
1174 EXPORT_SYMBOL(cfs_hash_putref);
1175
1176 static inline int
1177 cfs_hash_rehash_bits(cfs_hash_t *hs)
1178 {
1179         if (cfs_hash_with_no_lock(hs) ||
1180             !cfs_hash_with_rehash(hs))
1181                 return -EOPNOTSUPP;
1182
1183         if (unlikely(cfs_hash_is_exiting(hs)))
1184                 return -ESRCH;
1185
1186         if (unlikely(cfs_hash_is_rehashing(hs)))
1187                 return -EALREADY;
1188
1189         if (unlikely(cfs_hash_is_iterating(hs)))
1190                 return -EAGAIN;
1191
1192         /* XXX: need to handle case with max_theta != 2.0
1193          *      and the case with min_theta != 0.5 */
1194         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1195             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1196                 return hs->hs_cur_bits + 1;
1197
1198         if (!cfs_hash_with_shrink(hs))
1199                 return 0;
1200
1201         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1202             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1203                 return hs->hs_cur_bits - 1;
1204
1205         return 0;
1206 }
1207
1208 /**
1209  * don't allow inline rehash if:
1210  * - user wants non-blocking change (add/del) on hash table
1211  * - too many elements
1212  */
1213 static inline int
1214 cfs_hash_rehash_inline(cfs_hash_t *hs)
1215 {
1216         return !cfs_hash_with_nblk_change(hs) &&
1217                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1218 }
1219
1220 /**
1221  * Add item @hnode to libcfs hash @hs using @key.  The registered
1222  * ops->hs_get function will be called when the item is added.
1223  */
1224 void
1225 cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1226 {
1227         cfs_hash_bd_t   bd;
1228         int             bits;
1229
1230         LASSERT(hlist_unhashed(hnode));
1231
1232         cfs_hash_lock(hs, 0);
1233         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1234
1235         cfs_hash_key_validate(hs, key, hnode);
1236         cfs_hash_bd_add_locked(hs, &bd, hnode);
1237
1238         cfs_hash_bd_unlock(hs, &bd, 1);
1239
1240         bits = cfs_hash_rehash_bits(hs);
1241         cfs_hash_unlock(hs, 0);
1242         if (bits > 0)
1243                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1244 }
1245 EXPORT_SYMBOL(cfs_hash_add);
1246
1247 static struct hlist_node *
1248 cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1249                      struct hlist_node *hnode, int noref)
1250 {
1251         struct hlist_node *ehnode;
1252         cfs_hash_bd_t     bds[2];
1253         int               bits = 0;
1254
1255         LASSERT(hlist_unhashed(hnode));
1256
1257         cfs_hash_lock(hs, 0);
1258         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1259
1260         cfs_hash_key_validate(hs, key, hnode);
1261         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1262                                                  hnode, noref);
1263         cfs_hash_dual_bd_unlock(hs, bds, 1);
1264
1265         if (ehnode == hnode) /* new item added */
1266                 bits = cfs_hash_rehash_bits(hs);
1267         cfs_hash_unlock(hs, 0);
1268         if (bits > 0)
1269                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1270
1271         return ehnode;
1272 }
1273
1274 /**
1275  * Add item @hnode to libcfs hash @hs using @key.  The registered
1276  * ops->hs_get function will be called if the item was added.
1277  * Returns 0 on success or -EALREADY on key collisions.
1278  */
1279 int
1280 cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1281 {
1282         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1283                -EALREADY : 0;
1284 }
1285 EXPORT_SYMBOL(cfs_hash_add_unique);
1286
1287 /**
1288  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1289  * already exists in the hash then ops->hs_get will be called on the
1290  * conflicting entry and that entry will be returned to the caller.
1291  * Otherwise ops->hs_get is called on the item which was added.
1292  */
1293 void *
1294 cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1295                         struct hlist_node *hnode)
1296 {
1297         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1298
1299         return cfs_hash_object(hs, hnode);
1300 }
1301 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1302
1303 /**
1304  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1305  * is required to ensure the correct hash bucket is locked since there
1306  * is no direct linkage from the item to the bucket.  The object
1307  * removed from the hash will be returned and obs->hs_put is called
1308  * on the removed object.
1309  */
1310 void *
1311 cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1312 {
1313         void           *obj  = NULL;
1314         int             bits = 0;
1315         cfs_hash_bd_t   bds[2];
1316
1317         cfs_hash_lock(hs, 0);
1318         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1319
1320         /* NB: do nothing if @hnode is not in hash table */
1321         if (hnode == NULL || !hlist_unhashed(hnode)) {
1322                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1323                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1324                 } else {
1325                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1326                                                                 key, hnode);
1327                 }
1328         }
1329
1330         if (hnode != NULL) {
1331                 obj  = cfs_hash_object(hs, hnode);
1332                 bits = cfs_hash_rehash_bits(hs);
1333         }
1334
1335         cfs_hash_dual_bd_unlock(hs, bds, 1);
1336         cfs_hash_unlock(hs, 0);
1337         if (bits > 0)
1338                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1339
1340         return obj;
1341 }
1342 EXPORT_SYMBOL(cfs_hash_del);
1343
1344 /**
1345  * Delete item given @key in libcfs hash @hs.  The first @key found in
1346  * the hash will be removed, if the key exists multiple times in the hash
1347  * @hs this function must be called once per key.  The removed object
1348  * will be returned and ops->hs_put is called on the removed object.
1349  */
1350 void *
1351 cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1352 {
1353         return cfs_hash_del(hs, key, NULL);
1354 }
1355 EXPORT_SYMBOL(cfs_hash_del_key);
1356
1357 /**
1358  * Lookup an item using @key in the libcfs hash @hs and return it.
1359  * If the @key is found in the hash hs->hs_get() is called and the
1360  * matching objects is returned.  It is the callers responsibility
1361  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1362  * when when finished with the object.  If the @key was not found
1363  * in the hash @hs NULL is returned.
1364  */
1365 void *
1366 cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1367 {
1368         void                 *obj = NULL;
1369         struct hlist_node     *hnode;
1370         cfs_hash_bd_t         bds[2];
1371
1372         cfs_hash_lock(hs, 0);
1373         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1374
1375         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1376         if (hnode != NULL)
1377                 obj = cfs_hash_object(hs, hnode);
1378
1379         cfs_hash_dual_bd_unlock(hs, bds, 0);
1380         cfs_hash_unlock(hs, 0);
1381
1382         return obj;
1383 }
1384 EXPORT_SYMBOL(cfs_hash_lookup);
1385
1386 static void
1387 cfs_hash_for_each_enter(cfs_hash_t *hs)
1388 {
1389         LASSERT(!cfs_hash_is_exiting(hs));
1390
1391         if (!cfs_hash_with_rehash(hs))
1392                 return;
1393         /*
1394          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1395          * because it's just an unreliable signal to rehash-thread,
1396          * rehash-thread will try to finsih rehash ASAP when seeing this.
1397          */
1398         hs->hs_iterating = 1;
1399
1400         cfs_hash_lock(hs, 1);
1401         hs->hs_iterators++;
1402
1403         /* NB: iteration is mostly called by service thread,
1404          * we tend to cancel pending rehash-requst, instead of
1405          * blocking service thread, we will relaunch rehash request
1406          * after iteration */
1407         if (cfs_hash_is_rehashing(hs))
1408                 cfs_hash_rehash_cancel_locked(hs);
1409         cfs_hash_unlock(hs, 1);
1410 }
1411
1412 static void
1413 cfs_hash_for_each_exit(cfs_hash_t *hs)
1414 {
1415         int remained;
1416         int bits;
1417
1418         if (!cfs_hash_with_rehash(hs))
1419                 return;
1420         cfs_hash_lock(hs, 1);
1421         remained = --hs->hs_iterators;
1422         bits = cfs_hash_rehash_bits(hs);
1423         cfs_hash_unlock(hs, 1);
1424         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1425         if (remained == 0)
1426                 hs->hs_iterating = 0;
1427         if (bits > 0) {
1428                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1429                                     CFS_HASH_LOOP_HOG);
1430         }
1431 }
1432
1433 /**
1434  * For each item in the libcfs hash @hs call the passed callback @func
1435  * and pass to it as an argument each hash item and the private @data.
1436  *
1437  * a) the function may sleep!
1438  * b) during the callback:
1439  *    . the bucket lock is held so the callback must never sleep.
1440  *    . if @removal_safe is true, use can remove current item by
1441  *      cfs_hash_bd_del_locked
1442  */
1443 static __u64
1444 cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1445                         void *data, int remove_safe)
1446 {
1447         struct hlist_node       *hnode;
1448         struct hlist_node       *pos;
1449         cfs_hash_bd_t           bd;
1450         __u64                   count = 0;
1451         int                     excl  = !!remove_safe;
1452         int                     loop  = 0;
1453         int                     i;
1454         ENTRY;
1455
1456         cfs_hash_for_each_enter(hs);
1457
1458         cfs_hash_lock(hs, 0);
1459         LASSERT(!cfs_hash_is_rehashing(hs));
1460
1461         cfs_hash_for_each_bucket(hs, &bd, i) {
1462                 struct hlist_head *hhead;
1463
1464                 cfs_hash_bd_lock(hs, &bd, excl);
1465                 if (func == NULL) { /* only glimpse size */
1466                         count += bd.bd_bucket->hsb_count;
1467                         cfs_hash_bd_unlock(hs, &bd, excl);
1468                         continue;
1469                 }
1470
1471                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1472                         hlist_for_each_safe(hnode, pos, hhead) {
1473                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1474                                 count++;
1475                                 loop++;
1476                                 if (func(hs, &bd, hnode, data)) {
1477                                         cfs_hash_bd_unlock(hs, &bd, excl);
1478                                         goto out;
1479                                 }
1480                         }
1481                 }
1482                 cfs_hash_bd_unlock(hs, &bd, excl);
1483                 if (loop < CFS_HASH_LOOP_HOG)
1484                         continue;
1485                 loop = 0;
1486                 cfs_hash_unlock(hs, 0);
1487                 cond_resched();
1488                 cfs_hash_lock(hs, 0);
1489         }
1490  out:
1491         cfs_hash_unlock(hs, 0);
1492
1493         cfs_hash_for_each_exit(hs);
1494         RETURN(count);
1495 }
1496
1497 typedef struct {
1498         cfs_hash_cond_opt_cb_t  func;
1499         void                   *arg;
1500 } cfs_hash_cond_arg_t;
1501
1502 static int
1503 cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1504                          struct hlist_node *hnode, void *data)
1505 {
1506         cfs_hash_cond_arg_t *cond = data;
1507
1508         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1509                 cfs_hash_bd_del_locked(hs, bd, hnode);
1510         return 0;
1511 }
1512
1513 /**
1514  * Delete item from the libcfs hash @hs when @func return true.
1515  * The write lock being hold during loop for each bucket to avoid
1516  * any object be reference.
1517  */
1518 void
1519 cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1520 {
1521         cfs_hash_cond_arg_t arg = {
1522                 .func   = func,
1523                 .arg    = data,
1524         };
1525
1526         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1527 }
1528 EXPORT_SYMBOL(cfs_hash_cond_del);
1529
1530 void
1531 cfs_hash_for_each(cfs_hash_t *hs,
1532                   cfs_hash_for_each_cb_t func, void *data)
1533 {
1534         cfs_hash_for_each_tight(hs, func, data, 0);
1535 }
1536 EXPORT_SYMBOL(cfs_hash_for_each);
1537
1538 void
1539 cfs_hash_for_each_safe(cfs_hash_t *hs,
1540                        cfs_hash_for_each_cb_t func, void *data)
1541 {
1542         cfs_hash_for_each_tight(hs, func, data, 1);
1543 }
1544 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1545
1546 static int
1547 cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1548               struct hlist_node *hnode, void *data)
1549 {
1550         *(int *)data = 0;
1551         return 1; /* return 1 to break the loop */
1552 }
1553
1554 int
1555 cfs_hash_is_empty(cfs_hash_t *hs)
1556 {
1557         int empty = 1;
1558
1559         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1560         return empty;
1561 }
1562 EXPORT_SYMBOL(cfs_hash_is_empty);
1563
1564 __u64
1565 cfs_hash_size_get(cfs_hash_t *hs)
1566 {
1567         return cfs_hash_with_counter(hs) ?
1568                atomic_read(&hs->hs_count) :
1569                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1570 }
1571 EXPORT_SYMBOL(cfs_hash_size_get);
1572
1573 /*
1574  * cfs_hash_for_each_relax:
1575  * Iterate the hash table and call @func on each item without
1576  * any lock. This function can't guarantee to finish iteration
1577  * if these features are enabled:
1578  *
1579  *  a. if rehash_key is enabled, an item can be moved from
1580  *     one bucket to another bucket
1581  *  b. user can remove non-zero-ref item from hash-table,
1582  *     so the item can be removed from hash-table, even worse,
1583  *     it's possible that user changed key and insert to another
1584  *     hash bucket.
1585  * there's no way for us to finish iteration correctly on previous
1586  * two cases, so iteration has to be stopped on change.
1587  */
1588 static int
1589 cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1590 {
1591         struct hlist_node *hnode;
1592         struct hlist_node *tmp;
1593         cfs_hash_bd_t     bd;
1594         __u32             version;
1595         int               count = 0;
1596         int               stop_on_change;
1597         int               rc;
1598         int               i;
1599         ENTRY;
1600
1601         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1602                          !cfs_hash_with_no_itemref(hs) ||
1603                          CFS_HOP(hs, put_locked) == NULL;
1604         cfs_hash_lock(hs, 0);
1605         LASSERT(!cfs_hash_is_rehashing(hs));
1606
1607         cfs_hash_for_each_bucket(hs, &bd, i) {
1608                 struct hlist_head *hhead;
1609
1610                 cfs_hash_bd_lock(hs, &bd, 0);
1611                 version = cfs_hash_bd_version_get(&bd);
1612
1613                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1614                         for (hnode = hhead->first; hnode != NULL;) {
1615                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1616                                 cfs_hash_get(hs, hnode);
1617                                 cfs_hash_bd_unlock(hs, &bd, 0);
1618                                 cfs_hash_unlock(hs, 0);
1619
1620                                 rc = func(hs, &bd, hnode, data);
1621                                 if (stop_on_change)
1622                                         cfs_hash_put(hs, hnode);
1623                                 cond_resched();
1624                                 count++;
1625
1626                                 cfs_hash_lock(hs, 0);
1627                                 cfs_hash_bd_lock(hs, &bd, 0);
1628                                 if (!stop_on_change) {
1629                                         tmp = hnode->next;
1630                                         cfs_hash_put_locked(hs, hnode);
1631                                         hnode = tmp;
1632                                 } else { /* bucket changed? */
1633                                         if (version !=
1634                                             cfs_hash_bd_version_get(&bd))
1635                                                 break;
1636                                         /* safe to continue because no change */
1637                                         hnode = hnode->next;
1638                                 }
1639                                 if (rc) /* callback wants to break iteration */
1640                                         break;
1641                         }
1642                 }
1643                 cfs_hash_bd_unlock(hs, &bd, 0);
1644         }
1645         cfs_hash_unlock(hs, 0);
1646
1647         return count;
1648 }
1649
1650 int
1651 cfs_hash_for_each_nolock(cfs_hash_t *hs,
1652                          cfs_hash_for_each_cb_t func, void *data)
1653 {
1654         ENTRY;
1655
1656         if (cfs_hash_with_no_lock(hs) ||
1657             cfs_hash_with_rehash_key(hs) ||
1658             !cfs_hash_with_no_itemref(hs))
1659                 RETURN(-EOPNOTSUPP);
1660
1661         if (CFS_HOP(hs, get) == NULL ||
1662             (CFS_HOP(hs, put) == NULL &&
1663              CFS_HOP(hs, put_locked) == NULL))
1664                 RETURN(-EOPNOTSUPP);
1665
1666         cfs_hash_for_each_enter(hs);
1667         cfs_hash_for_each_relax(hs, func, data);
1668         cfs_hash_for_each_exit(hs);
1669
1670         RETURN(0);
1671 }
1672 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1673
1674 /**
1675  * For each hash bucket in the libcfs hash @hs call the passed callback
1676  * @func until all the hash buckets are empty.  The passed callback @func
1677  * or the previously registered callback hs->hs_put must remove the item
1678  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1679  * functions.  No rwlocks will be held during the callback @func it is
1680  * safe to sleep if needed.  This function will not terminate until the
1681  * hash is empty.  Note it is still possible to concurrently add new
1682  * items in to the hash.  It is the callers responsibility to ensure
1683  * the required locking is in place to prevent concurrent insertions.
1684  */
1685 int
1686 cfs_hash_for_each_empty(cfs_hash_t *hs,
1687                         cfs_hash_for_each_cb_t func, void *data)
1688 {
1689         unsigned  i = 0;
1690         ENTRY;
1691
1692         if (cfs_hash_with_no_lock(hs))
1693                 return -EOPNOTSUPP;
1694
1695         if (CFS_HOP(hs, get) == NULL ||
1696             (CFS_HOP(hs, put) == NULL &&
1697              CFS_HOP(hs, put_locked) == NULL))
1698                 return -EOPNOTSUPP;
1699
1700         cfs_hash_for_each_enter(hs);
1701         while (cfs_hash_for_each_relax(hs, func, data)) {
1702                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1703                        hs->hs_name, i++);
1704         }
1705         cfs_hash_for_each_exit(hs);
1706         RETURN(0);
1707 }
1708 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1709
1710 void
1711 cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1712                         cfs_hash_for_each_cb_t func, void *data)
1713 {
1714         struct hlist_head *hhead;
1715         struct hlist_node *hnode;
1716         cfs_hash_bd_t      bd;
1717
1718         cfs_hash_for_each_enter(hs);
1719         cfs_hash_lock(hs, 0);
1720         if (hindex >= CFS_HASH_NHLIST(hs))
1721                 goto out;
1722
1723         cfs_hash_bd_index_set(hs, hindex, &bd);
1724
1725         cfs_hash_bd_lock(hs, &bd, 0);
1726         hhead = cfs_hash_bd_hhead(hs, &bd);
1727         hlist_for_each(hnode, hhead) {
1728                 if (func(hs, &bd, hnode, data))
1729                         break;
1730         }
1731         cfs_hash_bd_unlock(hs, &bd, 0);
1732 out:
1733         cfs_hash_unlock(hs, 0);
1734         cfs_hash_for_each_exit(hs);
1735 }
1736
1737 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1738
1739 /*
1740  * For each item in the libcfs hash @hs which matches the @key call
1741  * the passed callback @func and pass to it as an argument each hash
1742  * item and the private @data. During the callback the bucket lock
1743  * is held so the callback must never sleep.
1744    */
1745 void
1746 cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1747                         cfs_hash_for_each_cb_t func, void *data)
1748 {
1749         struct hlist_node *hnode;
1750         cfs_hash_bd_t      bds[2];
1751         unsigned           i;
1752
1753         cfs_hash_lock(hs, 0);
1754
1755         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1756
1757         cfs_hash_for_each_bd(bds, 2, i) {
1758                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1759
1760                 hlist_for_each(hnode, hlist) {
1761                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1762
1763                         if (cfs_hash_keycmp(hs, key, hnode)) {
1764                                 if (func(hs, &bds[i], hnode, data))
1765                                         break;
1766                         }
1767                 }
1768         }
1769
1770         cfs_hash_dual_bd_unlock(hs, bds, 0);
1771         cfs_hash_unlock(hs, 0);
1772 }
1773 EXPORT_SYMBOL(cfs_hash_for_each_key);
1774
1775 /**
1776  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1777  * to grow the hash size when excessive chaining is detected, or to
1778  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1779  * flag is set in @hs the libcfs hash may be dynamically rehashed
1780  * during addition or removal if the hash's theta value exceeds
1781  * either the hs->hs_min_theta or hs->max_theta values.  By default
1782  * these values are tuned to keep the chained hash depth small, and
1783  * this approach assumes a reasonably uniform hashing function.  The
1784  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1785  */
1786 void
1787 cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1788 {
1789         int     i;
1790
1791         /* need hold cfs_hash_lock(hs, 1) */
1792         LASSERT(cfs_hash_with_rehash(hs) &&
1793                 !cfs_hash_with_no_lock(hs));
1794
1795         if (!cfs_hash_is_rehashing(hs))
1796                 return;
1797
1798         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1799                 hs->hs_rehash_bits = 0;
1800                 return;
1801         }
1802
1803         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1804                 cfs_hash_unlock(hs, 1);
1805                 /* raise console warning while waiting too long */
1806                 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1807                        "hash %s is still rehashing, rescheded %d\n",
1808                        hs->hs_name, i - 1);
1809                 cond_resched();
1810                 cfs_hash_lock(hs, 1);
1811         }
1812 }
1813 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1814
1815 void
1816 cfs_hash_rehash_cancel(cfs_hash_t *hs)
1817 {
1818         cfs_hash_lock(hs, 1);
1819         cfs_hash_rehash_cancel_locked(hs);
1820         cfs_hash_unlock(hs, 1);
1821 }
1822 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1823
1824 int
1825 cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1826 {
1827         int     rc;
1828
1829         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1830
1831         cfs_hash_lock(hs, 1);
1832
1833         rc = cfs_hash_rehash_bits(hs);
1834         if (rc <= 0) {
1835                 cfs_hash_unlock(hs, 1);
1836                 return rc;
1837         }
1838
1839         hs->hs_rehash_bits = rc;
1840         if (!do_rehash) {
1841                 /* launch and return */
1842                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1843                 cfs_hash_unlock(hs, 1);
1844                 return 0;
1845         }
1846
1847         /* rehash right now */
1848         cfs_hash_unlock(hs, 1);
1849
1850         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1851 }
1852 EXPORT_SYMBOL(cfs_hash_rehash);
1853
1854 static int
1855 cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1856 {
1857         cfs_hash_bd_t      new;
1858         struct hlist_head *hhead;
1859         struct hlist_node *hnode;
1860         struct hlist_node *pos;
1861         void              *key;
1862         int                c = 0;
1863
1864         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1865         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1866                 hlist_for_each_safe(hnode, pos, hhead) {
1867                         key = cfs_hash_key(hs, hnode);
1868                         LASSERT(key != NULL);
1869                         /* Validate hnode is in the correct bucket. */
1870                         cfs_hash_bucket_validate(hs, old, hnode);
1871                         /*
1872                          * Delete from old hash bucket; move to new bucket.
1873                          * ops->hs_key must be defined.
1874                          */
1875                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1876                                              hs->hs_rehash_bits, key, &new);
1877                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1878                         c++;
1879                 }
1880         }
1881         return c;
1882 }
1883
1884 static int
1885 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1886 {
1887         cfs_hash_t         *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1888         cfs_hash_bucket_t **bkts;
1889         cfs_hash_bd_t       bd;
1890         unsigned int        old_size;
1891         unsigned int        new_size;
1892         int                 bsize;
1893         int                 count = 0;
1894         int                 rc = 0;
1895         int                 i;
1896
1897         LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1898
1899         cfs_hash_lock(hs, 0);
1900         LASSERT(cfs_hash_is_rehashing(hs));
1901
1902         old_size = CFS_HASH_NBKT(hs);
1903         new_size = CFS_HASH_RH_NBKT(hs);
1904
1905         cfs_hash_unlock(hs, 0);
1906
1907         /*
1908          * don't need hs::hs_rwlock for hs::hs_buckets,
1909          * because nobody can change bkt-table except me.
1910          */
1911         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1912                                         old_size, new_size);
1913         cfs_hash_lock(hs, 1);
1914         if (bkts == NULL) {
1915                 rc = -ENOMEM;
1916                 goto out;
1917         }
1918
1919         if (bkts == hs->hs_buckets) {
1920                 bkts = NULL; /* do nothing */
1921                 goto out;
1922         }
1923
1924         rc = __cfs_hash_theta(hs);
1925         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1926                 /* free the new allocated bkt-table */
1927                 old_size = new_size;
1928                 new_size = CFS_HASH_NBKT(hs);
1929                 rc = -EALREADY;
1930                 goto out;
1931         }
1932
1933         LASSERT(hs->hs_rehash_buckets == NULL);
1934         hs->hs_rehash_buckets = bkts;
1935
1936         rc = 0;
1937         cfs_hash_for_each_bucket(hs, &bd, i) {
1938                 if (cfs_hash_is_exiting(hs)) {
1939                         rc = -ESRCH;
1940                         /* someone wants to destroy the hash, abort now */
1941                         if (old_size < new_size) /* OK to free old bkt-table */
1942                                 break;
1943                         /* it's shrinking, need free new bkt-table */
1944                         hs->hs_rehash_buckets = NULL;
1945                         old_size = new_size;
1946                         new_size = CFS_HASH_NBKT(hs);
1947                         goto out;
1948                 }
1949
1950                 count += cfs_hash_rehash_bd(hs, &bd);
1951                 if (count < CFS_HASH_LOOP_HOG ||
1952                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1953                         continue;
1954                 }
1955
1956                 count = 0;
1957                 cfs_hash_unlock(hs, 1);
1958                 cond_resched();
1959                 cfs_hash_lock(hs, 1);
1960         }
1961
1962         hs->hs_rehash_count++;
1963
1964         bkts = hs->hs_buckets;
1965         hs->hs_buckets = hs->hs_rehash_buckets;
1966         hs->hs_rehash_buckets = NULL;
1967
1968         hs->hs_cur_bits = hs->hs_rehash_bits;
1969  out:
1970         hs->hs_rehash_bits = 0;
1971         if (rc == -ESRCH) /* never be scheduled again */
1972                 cfs_wi_exit(cfs_sched_rehash, wi);
1973         bsize = cfs_hash_bkt_size(hs);
1974         cfs_hash_unlock(hs, 1);
1975         /* can't refer to @hs anymore because it could be destroyed */
1976         if (bkts != NULL)
1977                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1978         if (rc != 0)
1979                 CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1980         /* return 1 only if cfs_wi_exit is called */
1981         return rc == -ESRCH;
1982 }
1983
1984 /**
1985  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1986  * @old_key must be provided to locate the objects previous location
1987  * in the hash, and the @new_key will be used to reinsert the object.
1988  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1989  * combo when it is critical that there is no window in time where the
1990  * object is missing from the hash.  When an object is being rehashed
1991  * the registered cfs_hash_get() and cfs_hash_put() functions will
1992  * not be called.
1993  */
1994 void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1995                          void *new_key, struct hlist_node *hnode)
1996 {
1997         cfs_hash_bd_t        bds[3];
1998         cfs_hash_bd_t        old_bds[2];
1999         cfs_hash_bd_t        new_bd;
2000
2001         LASSERT(!hlist_unhashed(hnode));
2002
2003         cfs_hash_lock(hs, 0);
2004
2005         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2006         cfs_hash_bd_get(hs, new_key, &new_bd);
2007
2008         bds[0] = old_bds[0];
2009         bds[1] = old_bds[1];
2010         bds[2] = new_bd;
2011
2012         /* NB: bds[0] and bds[1] are ordered already */
2013         cfs_hash_bd_order(&bds[1], &bds[2]);
2014         cfs_hash_bd_order(&bds[0], &bds[1]);
2015
2016         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2017         if (likely(old_bds[1].bd_bucket == NULL)) {
2018                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2019         } else {
2020                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2021                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2022         }
2023         /* overwrite key inside locks, otherwise may screw up with
2024          * other operations, i.e: rehash */
2025         cfs_hash_keycpy(hs, hnode, new_key);
2026
2027         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2028         cfs_hash_unlock(hs, 0);
2029 }
2030 EXPORT_SYMBOL(cfs_hash_rehash_key);
2031
2032 int cfs_hash_debug_header(struct seq_file *m)
2033 {
2034         return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2035                         CFS_HASH_BIGNAME_LEN,
2036                         "name", "cur", "min", "max", "theta", "t-min", "t-max",
2037                         "flags", "rehash", "count", "maxdep", "maxdepb",
2038                         " distribution");
2039 }
2040 EXPORT_SYMBOL(cfs_hash_debug_header);
2041
2042 static cfs_hash_bucket_t **
2043 cfs_hash_full_bkts(cfs_hash_t *hs)
2044 {
2045         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2046         if (hs->hs_rehash_buckets == NULL)
2047                 return hs->hs_buckets;
2048
2049         LASSERT(hs->hs_rehash_bits != 0);
2050         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2051                hs->hs_rehash_buckets : hs->hs_buckets;
2052 }
2053
2054 static unsigned int
2055 cfs_hash_full_nbkt(cfs_hash_t *hs)
2056 {
2057         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2058         if (hs->hs_rehash_buckets == NULL)
2059                 return CFS_HASH_NBKT(hs);
2060
2061         LASSERT(hs->hs_rehash_bits != 0);
2062         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2063                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2064 }
2065
2066 int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
2067 {
2068         int     dist[8] = { 0, };
2069         int     maxdep  = -1;
2070         int     maxdepb = -1;
2071         int     total   = 0;
2072         int     c       = 0;
2073         int     theta;
2074         int     i;
2075
2076         cfs_hash_lock(hs, 0);
2077         theta = __cfs_hash_theta(hs);
2078
2079         c += seq_printf(m, "%-*s ", CFS_HASH_BIGNAME_LEN, hs->hs_name);
2080         c += seq_printf(m, "%5d ",  1 << hs->hs_cur_bits);
2081         c += seq_printf(m, "%5d ",  1 << hs->hs_min_bits);
2082         c += seq_printf(m, "%5d ",  1 << hs->hs_max_bits);
2083         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(theta),
2084                         __cfs_hash_theta_frac(theta));
2085         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_min_theta),
2086                         __cfs_hash_theta_frac(hs->hs_min_theta));
2087         c += seq_printf(m, "%d.%03d ", __cfs_hash_theta_int(hs->hs_max_theta),
2088                         __cfs_hash_theta_frac(hs->hs_max_theta));
2089         c += seq_printf(m, " 0x%02x ", hs->hs_flags);
2090         c += seq_printf(m, "%6d ", hs->hs_rehash_count);
2091
2092         /*
2093          * The distribution is a summary of the chained hash depth in
2094          * each of the libcfs hash buckets.  Each buckets hsb_count is
2095          * divided by the hash theta value and used to generate a
2096          * histogram of the hash distribution.  A uniform hash will
2097          * result in all hash buckets being close to the average thus
2098          * only the first few entries in the histogram will be non-zero.
2099          * If you hash function results in a non-uniform hash the will
2100          * be observable by outlier bucks in the distribution histogram.
2101          *
2102          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2103          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2104          */
2105         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2106                 cfs_hash_bd_t bd;
2107
2108                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2109                 cfs_hash_bd_lock(hs, &bd, 0);
2110                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2111                         maxdep  = bd.bd_bucket->hsb_depmax;
2112 #ifdef __KERNEL__
2113                         maxdepb = ffz(~maxdep);
2114 #endif
2115                 }
2116                 total += bd.bd_bucket->hsb_count;
2117                 dist[min(fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2118                 cfs_hash_bd_unlock(hs, &bd, 0);
2119         }
2120
2121         c += seq_printf(m, "%7d ", total);
2122         c += seq_printf(m, "%7d ", maxdep);
2123         c += seq_printf(m, "%7d ", maxdepb);
2124         for (i = 0; i < 8; i++)
2125                 c += seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2126
2127         cfs_hash_unlock(hs, 0);
2128         return c;
2129 }
2130 EXPORT_SYMBOL(cfs_hash_debug_str);