Whamcloud - gitweb
LU-9567 mgc: set cfg_instance to NULL for sptlrpc case
[fs/lustre-release.git] / libcfs / libcfs / hash.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * libcfs/libcfs/hash.c
33  *
34  * Implement a hash class for hash process in lustre system.
35  *
36  * Author: YuZhangyong <yzy@clusterfs.com>
37  *
38  * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
39  * - Simplified API and improved documentation
40  * - Added per-hash feature flags:
41  *   * CFS_HASH_DEBUG additional validation
42  *   * CFS_HASH_REHASH dynamic rehashing
43  * - Added per-hash statistics
44  * - General performance enhancements
45  *
46  * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
47  * - move all stuff to libcfs
48  * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
49  * - ignore hs_rwlock if without CFS_HASH_REHASH setting
50  * - buckets are allocated one by one(instead of contiguous memory),
51  *   to avoid unnecessary cacheline conflict
52  *
53  * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
54  * - "bucket" is a group of hlist_head now, user can specify bucket size
55  *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
56  *   one lock for reducing memory overhead.
57  *
58  * - support lockless hash, caller will take care of locks:
59  *   avoid lock overhead for hash tables that are already protected
60  *   by locking in the caller for another reason
61  *
62  * - support both spin_lock/rwlock for bucket:
63  *   overhead of spinlock contention is lower than read/write
64  *   contention of rwlock, so using spinlock to serialize operations on
65  *   bucket is more reasonable for those frequently changed hash tables
66  *
67  * - support one-single lock mode:
68  *   one lock to protect all hash operations to avoid overhead of
69  *   multiple locks if hash table is always small
70  *
71  * - removed a lot of unnecessary addref & decref on hash element:
72  *   addref & decref are atomic operations in many use-cases which
73  *   are expensive.
74  *
75  * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
76  *   some lustre use-cases require these functions to be strictly
77  *   non-blocking, we need to schedule required rehash on a different
78  *   thread on those cases.
79  *
80  * - safer rehash on large hash table
81  *   In old implementation, rehash function will exclusively lock the
82  *   hash table and finish rehash in one batch, it's dangerous on SMP
83  *   system because rehash millions of elements could take long time.
84  *   New implemented rehash can release lock and relax CPU in middle
85  *   of rehash, it's safe for another thread to search/change on the
86  *   hash table even it's in rehasing.
87  *
88  * - support two different refcount modes
89  *   . hash table has refcount on element
90  *   . hash table doesn't change refcount on adding/removing element
91  *
92  * - support long name hash table (for param-tree)
93  *
94  * - fix a bug for cfs_hash_rehash_key:
95  *   in old implementation, cfs_hash_rehash_key could screw up the
96  *   hash-table because @key is overwritten without any protection.
97  *   Now we need user to define hs_keycpy for those rehash enabled
98  *   hash tables, cfs_hash_rehash_key will overwrite hash-key
99  *   inside lock by calling hs_keycpy.
100  *
101  * - better hash iteration:
102  *   Now we support both locked iteration & lockless iteration of hash
103  *   table. Also, user can break the iteration by return 1 in callback.
104  */
105 #include <linux/seq_file.h>
106 #include <linux/log2.h>
107
108 #include <libcfs/linux/linux-list.h>
109 #include <libcfs/libcfs.h>
110
111 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
112 static unsigned int warn_on_depth = 8;
113 module_param(warn_on_depth, uint, 0644);
114 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
115 #endif
116
117 struct cfs_wi_sched *cfs_sched_rehash;
118
119 static inline void
120 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
121
122 static inline void
123 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
124
125 static inline void
126 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
127         __acquires(&lock->spin)
128 {
129         spin_lock(&lock->spin);
130 }
131
132 static inline void
133 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
134         __releases(&lock->spin)
135 {
136         spin_unlock(&lock->spin);
137 }
138
139 static inline void
140 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
141         __acquires(&lock->rw)
142 {
143         if (!exclusive)
144                 read_lock(&lock->rw);
145         else
146                 write_lock(&lock->rw);
147 }
148
149 static inline void
150 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
151         __releases(&lock->rw)
152 {
153         if (!exclusive)
154                 read_unlock(&lock->rw);
155         else
156                 write_unlock(&lock->rw);
157 }
158
159 /** No lock hash */
160 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
161         .hs_lock        = cfs_hash_nl_lock,
162         .hs_unlock      = cfs_hash_nl_unlock,
163         .hs_bkt_lock    = cfs_hash_nl_lock,
164         .hs_bkt_unlock  = cfs_hash_nl_unlock,
165 };
166
167 /** no bucket lock, one spinlock to protect everything */
168 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
169         .hs_lock        = cfs_hash_spin_lock,
170         .hs_unlock      = cfs_hash_spin_unlock,
171         .hs_bkt_lock    = cfs_hash_nl_lock,
172         .hs_bkt_unlock  = cfs_hash_nl_unlock,
173 };
174
175 /** spin bucket lock, rehash is enabled */
176 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
177         .hs_lock        = cfs_hash_rw_lock,
178         .hs_unlock      = cfs_hash_rw_unlock,
179         .hs_bkt_lock    = cfs_hash_spin_lock,
180         .hs_bkt_unlock  = cfs_hash_spin_unlock,
181 };
182
183 /** rw bucket lock, rehash is enabled */
184 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
185         .hs_lock        = cfs_hash_rw_lock,
186         .hs_unlock      = cfs_hash_rw_unlock,
187         .hs_bkt_lock    = cfs_hash_rw_lock,
188         .hs_bkt_unlock  = cfs_hash_rw_unlock,
189 };
190
191 /** spin bucket lock, rehash is disabled */
192 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
193         .hs_lock        = cfs_hash_nl_lock,
194         .hs_unlock      = cfs_hash_nl_unlock,
195         .hs_bkt_lock    = cfs_hash_spin_lock,
196         .hs_bkt_unlock  = cfs_hash_spin_unlock,
197 };
198
199 /** rw bucket lock, rehash is disabled */
200 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
201         .hs_lock        = cfs_hash_nl_lock,
202         .hs_unlock      = cfs_hash_nl_unlock,
203         .hs_bkt_lock    = cfs_hash_rw_lock,
204         .hs_bkt_unlock  = cfs_hash_rw_unlock,
205 };
206
207 static void
208 cfs_hash_lock_setup(struct cfs_hash *hs)
209 {
210         if (cfs_hash_with_no_lock(hs)) {
211                 hs->hs_lops = &cfs_hash_nl_lops;
212
213         } else if (cfs_hash_with_no_bktlock(hs)) {
214                 hs->hs_lops = &cfs_hash_nbl_lops;
215                 spin_lock_init(&hs->hs_lock.spin);
216
217         } else if (cfs_hash_with_rehash(hs)) {
218                 rwlock_init(&hs->hs_lock.rw);
219
220                 if (cfs_hash_with_rw_bktlock(hs))
221                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
222                 else if (cfs_hash_with_spin_bktlock(hs))
223                         hs->hs_lops = &cfs_hash_bkt_spin_lops;
224                 else
225                         LBUG();
226         } else {
227                 if (cfs_hash_with_rw_bktlock(hs))
228                         hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
229                 else if (cfs_hash_with_spin_bktlock(hs))
230                         hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
231                 else
232                         LBUG();
233         }
234 }
235
236 /**
237  * Simple hash head without depth tracking
238  * new element is always added to head of hlist
239  */
240 struct cfs_hash_head {
241         struct hlist_head       hh_head;        /**< entries list */
242 };
243
244 static int
245 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
246 {
247         return sizeof(struct cfs_hash_head);
248 }
249
250 static struct hlist_head *
251 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
252 {
253         struct cfs_hash_head *head;
254
255         head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
256         return &head[bd->bd_offset].hh_head;
257 }
258
259 static int
260 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
261                       struct hlist_node *hnode)
262 {
263         hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
264         return -1; /* unknown depth */
265 }
266
267 static int
268 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
269                       struct hlist_node *hnode)
270 {
271         hlist_del_init(hnode);
272         return -1; /* unknown depth */
273 }
274
275 /**
276  * Simple hash head with depth tracking
277  * new element is always added to head of hlist
278  */
279 struct cfs_hash_head_dep {
280         struct hlist_head       hd_head;        /**< entries list */
281         unsigned int            hd_depth;       /**< list length */
282 };
283
284 static int
285 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
286 {
287         return sizeof(struct cfs_hash_head_dep);
288 }
289
290 static struct hlist_head *
291 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
292 {
293         struct cfs_hash_head_dep   *head;
294
295         head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
296         return &head[bd->bd_offset].hd_head;
297 }
298
299 static int
300 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
301                       struct hlist_node *hnode)
302 {
303         struct cfs_hash_head_dep *hh;
304
305         hh = container_of(cfs_hash_hd_hhead(hs, bd),
306                           struct cfs_hash_head_dep, hd_head);
307         hlist_add_head(hnode, &hh->hd_head);
308         return ++hh->hd_depth;
309 }
310
311 static int
312 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
313                       struct hlist_node *hnode)
314 {
315         struct cfs_hash_head_dep *hh;
316
317         hh = container_of(cfs_hash_hd_hhead(hs, bd),
318                           struct cfs_hash_head_dep, hd_head);
319         hlist_del_init(hnode);
320         return --hh->hd_depth;
321 }
322
323 /**
324  * double links hash head without depth tracking
325  * new element is always added to tail of hlist
326  */
327 struct cfs_hash_dhead {
328         struct hlist_head       dh_head;        /**< entries list */
329         struct hlist_node       *dh_tail;       /**< the last entry */
330 };
331
332 static int
333 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
334 {
335         return sizeof(struct cfs_hash_dhead);
336 }
337
338 static struct hlist_head *
339 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
340 {
341         struct cfs_hash_dhead *head;
342
343         head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
344         return &head[bd->bd_offset].dh_head;
345 }
346
347 static int
348 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
349                       struct hlist_node *hnode)
350 {
351         struct cfs_hash_dhead *dh;
352
353         dh = container_of(cfs_hash_dh_hhead(hs, bd),
354                           struct cfs_hash_dhead, dh_head);
355         if (dh->dh_tail != NULL) /* not empty */
356                 hlist_add_behind(hnode, dh->dh_tail);
357         else /* empty list */
358                 hlist_add_head(hnode, &dh->dh_head);
359         dh->dh_tail = hnode;
360         return -1; /* unknown depth */
361 }
362
363 static int
364 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
365                       struct hlist_node *hnd)
366 {
367         struct cfs_hash_dhead *dh;
368
369         dh = container_of(cfs_hash_dh_hhead(hs, bd),
370                           struct cfs_hash_dhead, dh_head);
371         if (hnd->next == NULL) { /* it's the tail */
372                 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
373                               container_of(hnd->pprev, struct hlist_node, next);
374         }
375         hlist_del_init(hnd);
376         return -1; /* unknown depth */
377 }
378
379 /**
380  * double links hash head with depth tracking
381  * new element is always added to tail of hlist
382  */
383 struct cfs_hash_dhead_dep {
384         struct hlist_head       dd_head;        /**< entries list */
385         struct hlist_node       *dd_tail;       /**< the last entry */
386         unsigned int            dd_depth;       /**< list length */
387 };
388
389 static int
390 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
391 {
392         return sizeof(struct cfs_hash_dhead_dep);
393 }
394
395 static struct hlist_head *
396 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
397 {
398         struct cfs_hash_dhead_dep *head;
399
400         head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
401         return &head[bd->bd_offset].dd_head;
402 }
403
404 static int
405 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
406                       struct hlist_node *hnode)
407 {
408         struct cfs_hash_dhead_dep *dh;
409
410         dh = container_of(cfs_hash_dd_hhead(hs, bd),
411                           struct cfs_hash_dhead_dep, dd_head);
412         if (dh->dd_tail != NULL) /* not empty */
413                 hlist_add_behind(hnode, dh->dd_tail);
414         else /* empty list */
415                 hlist_add_head(hnode, &dh->dd_head);
416         dh->dd_tail = hnode;
417         return ++dh->dd_depth;
418 }
419
420 static int
421 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
422                       struct hlist_node *hnd)
423 {
424         struct cfs_hash_dhead_dep *dh;
425
426         dh = container_of(cfs_hash_dd_hhead(hs, bd),
427                           struct cfs_hash_dhead_dep, dd_head);
428         if (hnd->next == NULL) { /* it's the tail */
429                 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
430                               container_of(hnd->pprev, struct hlist_node, next);
431         }
432         hlist_del_init(hnd);
433         return --dh->dd_depth;
434 }
435
436 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
437        .hop_hhead      = cfs_hash_hh_hhead,
438        .hop_hhead_size = cfs_hash_hh_hhead_size,
439        .hop_hnode_add  = cfs_hash_hh_hnode_add,
440        .hop_hnode_del  = cfs_hash_hh_hnode_del,
441 };
442
443 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
444        .hop_hhead      = cfs_hash_hd_hhead,
445        .hop_hhead_size = cfs_hash_hd_hhead_size,
446        .hop_hnode_add  = cfs_hash_hd_hnode_add,
447        .hop_hnode_del  = cfs_hash_hd_hnode_del,
448 };
449
450 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
451        .hop_hhead      = cfs_hash_dh_hhead,
452        .hop_hhead_size = cfs_hash_dh_hhead_size,
453        .hop_hnode_add  = cfs_hash_dh_hnode_add,
454        .hop_hnode_del  = cfs_hash_dh_hnode_del,
455 };
456
457 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
458        .hop_hhead      = cfs_hash_dd_hhead,
459        .hop_hhead_size = cfs_hash_dd_hhead_size,
460        .hop_hnode_add  = cfs_hash_dd_hnode_add,
461        .hop_hnode_del  = cfs_hash_dd_hnode_del,
462 };
463
464 static void
465 cfs_hash_hlist_setup(struct cfs_hash *hs)
466 {
467         if (cfs_hash_with_add_tail(hs)) {
468                 hs->hs_hops = cfs_hash_with_depth(hs) ?
469                               &cfs_hash_dd_hops : &cfs_hash_dh_hops;
470         } else {
471                 hs->hs_hops = cfs_hash_with_depth(hs) ?
472                               &cfs_hash_hd_hops : &cfs_hash_hh_hops;
473         }
474 }
475
476 static void
477 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
478                      unsigned int bits, const void *key, struct cfs_hash_bd *bd)
479 {
480         unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
481
482         LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
483
484         bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
485         bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
486 }
487
488 void
489 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
490 {
491         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
492         if (likely(hs->hs_rehash_buckets == NULL)) {
493                 cfs_hash_bd_from_key(hs, hs->hs_buckets,
494                                      hs->hs_cur_bits, key, bd);
495         } else {
496                 LASSERT(hs->hs_rehash_bits != 0);
497                 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
498                                      hs->hs_rehash_bits, key, bd);
499         }
500 }
501 EXPORT_SYMBOL(cfs_hash_bd_get);
502
503 static inline void
504 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
505 {
506         if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
507                 return;
508
509         bd->bd_bucket->hsb_depmax = dep_cur;
510 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
511         if (likely(warn_on_depth == 0 ||
512                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
513                 return;
514
515         spin_lock(&hs->hs_dep_lock);
516         hs->hs_dep_max  = dep_cur;
517         hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
518         hs->hs_dep_off  = bd->bd_offset;
519         hs->hs_dep_bits = hs->hs_cur_bits;
520         spin_unlock(&hs->hs_dep_lock);
521
522         cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
523 # endif
524 }
525
526 void
527 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
528                         struct hlist_node *hnode)
529 {
530         int rc;
531
532         rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
533         cfs_hash_bd_dep_record(hs, bd, rc);
534         bd->bd_bucket->hsb_version++;
535         if (unlikely(bd->bd_bucket->hsb_version == 0))
536                 bd->bd_bucket->hsb_version++;
537         bd->bd_bucket->hsb_count++;
538
539         if (cfs_hash_with_counter(hs))
540                 atomic_inc(&hs->hs_count);
541         if (!cfs_hash_with_no_itemref(hs))
542                 cfs_hash_get(hs, hnode);
543 }
544 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
545
546 void
547 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
548                        struct hlist_node *hnode)
549 {
550         hs->hs_hops->hop_hnode_del(hs, bd, hnode);
551
552         LASSERT(bd->bd_bucket->hsb_count > 0);
553         bd->bd_bucket->hsb_count--;
554         bd->bd_bucket->hsb_version++;
555         if (unlikely(bd->bd_bucket->hsb_version == 0))
556                 bd->bd_bucket->hsb_version++;
557
558         if (cfs_hash_with_counter(hs)) {
559                 LASSERT(atomic_read(&hs->hs_count) > 0);
560                 atomic_dec(&hs->hs_count);
561         }
562         if (!cfs_hash_with_no_itemref(hs))
563                 cfs_hash_put_locked(hs, hnode);
564 }
565 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
566
567 void
568 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
569                         struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
570 {
571         struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
572         struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
573         int                rc;
574
575         if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
576                 return;
577
578         /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
579          * in cfs_hash_bd_del/add_locked */
580         hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
581         rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
582         cfs_hash_bd_dep_record(hs, bd_new, rc);
583
584         LASSERT(obkt->hsb_count > 0);
585         obkt->hsb_count--;
586         obkt->hsb_version++;
587         if (unlikely(obkt->hsb_version == 0))
588                 obkt->hsb_version++;
589         nbkt->hsb_count++;
590         nbkt->hsb_version++;
591         if (unlikely(nbkt->hsb_version == 0))
592                 nbkt->hsb_version++;
593 }
594
595 enum {
596         /** always set, for sanity (avoid ZERO intent) */
597         CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
598         /** return entry with a ref */
599         CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
600         /** add entry if not existing */
601         CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
602         /** delete entry, ignore other masks */
603         CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
604 };
605
606 enum cfs_hash_lookup_intent {
607         /** return item w/o refcount */
608         CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
609         /** return item with refcount */
610         CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
611                                        CFS_HS_LOOKUP_MASK_REF),
612         /** return item w/o refcount if existed, otherwise add */
613         CFS_HS_LOOKUP_IT_ADD        = (CFS_HS_LOOKUP_MASK_FIND |
614                                        CFS_HS_LOOKUP_MASK_ADD),
615         /** return item with refcount if existed, otherwise add */
616         CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
617                                        CFS_HS_LOOKUP_MASK_ADD),
618         /** delete if existed */
619         CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
620                                        CFS_HS_LOOKUP_MASK_DEL)
621 };
622
623 static struct hlist_node *
624 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
625                           const void *key, struct hlist_node *hnode,
626                           enum cfs_hash_lookup_intent intent)
627
628 {
629         struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
630         struct hlist_node  *ehnode;
631         struct hlist_node  *match;
632         int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
633
634         /* with this function, we can avoid a lot of useless refcount ops,
635          * which are expensive atomic operations most time. */
636         match = intent_add ? NULL : hnode;
637         hlist_for_each(ehnode, hhead) {
638                 if (!cfs_hash_keycmp(hs, key, ehnode))
639                         continue;
640
641                 if (match != NULL && match != ehnode) /* can't match */
642                         continue;
643
644                 /* match and ... */
645                 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
646                         cfs_hash_bd_del_locked(hs, bd, ehnode);
647                         return ehnode;
648                 }
649
650                 /* caller wants refcount? */
651                 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
652                         cfs_hash_get(hs, ehnode);
653                 return ehnode;
654         }
655         /* no match item */
656         if (!intent_add)
657                 return NULL;
658
659         LASSERT(hnode != NULL);
660         cfs_hash_bd_add_locked(hs, bd, hnode);
661         return hnode;
662 }
663
664 struct hlist_node *
665 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
666                           const void *key)
667 {
668         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
669                                         CFS_HS_LOOKUP_IT_FIND);
670 }
671 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
672
673 struct hlist_node *
674 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
675                         const void *key)
676 {
677         return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
678                                         CFS_HS_LOOKUP_IT_PEEK);
679 }
680 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
681
682 static void
683 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
684                        unsigned n, int excl)
685 {
686         struct cfs_hash_bucket *prev = NULL;
687         int                i;
688
689         /**
690          * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
691          * NB: it's possible that several bds point to the same bucket but
692          * have different bd::bd_offset, so need take care of deadlock.
693          */
694         cfs_hash_for_each_bd(bds, n, i) {
695                 if (prev == bds[i].bd_bucket)
696                         continue;
697
698                 LASSERT(prev == NULL ||
699                         prev->hsb_index < bds[i].bd_bucket->hsb_index);
700                 cfs_hash_bd_lock(hs, &bds[i], excl);
701                 prev = bds[i].bd_bucket;
702         }
703 }
704
705 static void
706 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
707                          unsigned n, int excl)
708 {
709         struct cfs_hash_bucket *prev = NULL;
710         int                i;
711
712         cfs_hash_for_each_bd(bds, n, i) {
713                 if (prev != bds[i].bd_bucket) {
714                         cfs_hash_bd_unlock(hs, &bds[i], excl);
715                         prev = bds[i].bd_bucket;
716                 }
717         }
718 }
719
720 static struct hlist_node *
721 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
722                                 unsigned n, const void *key)
723 {
724         struct hlist_node *ehnode;
725         unsigned          i;
726
727         cfs_hash_for_each_bd(bds, n, i) {
728                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
729                                                         CFS_HS_LOOKUP_IT_FIND);
730                 if (ehnode != NULL)
731                         return ehnode;
732         }
733         return NULL;
734 }
735
736 static struct hlist_node *
737 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
738                                  unsigned n, const void *key,
739                                  struct hlist_node *hnode, int noref)
740 {
741         struct hlist_node *ehnode;
742         int               intent;
743         unsigned          i;
744
745         LASSERT(hnode != NULL);
746         intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
747
748         cfs_hash_for_each_bd(bds, n, i) {
749                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
750                                                    NULL, intent);
751                 if (ehnode != NULL)
752                         return ehnode;
753         }
754
755         if (i == 1) { /* only one bucket */
756                 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
757         } else {
758                 struct cfs_hash_bd      mybd;
759
760                 cfs_hash_bd_get(hs, key, &mybd);
761                 cfs_hash_bd_add_locked(hs, &mybd, hnode);
762         }
763
764         return hnode;
765 }
766
767 static struct hlist_node *
768 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
769                                  unsigned n, const void *key,
770                                  struct hlist_node *hnode)
771 {
772         struct hlist_node *ehnode;
773         unsigned           i;
774
775         cfs_hash_for_each_bd(bds, n, i) {
776                 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
777                                                    CFS_HS_LOOKUP_IT_FINDDEL);
778                 if (ehnode != NULL)
779                         return ehnode;
780         }
781         return NULL;
782 }
783
784 static void
785 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
786 {
787         int     rc;
788
789         if (bd2->bd_bucket == NULL)
790                 return;
791
792         if (bd1->bd_bucket == NULL) {
793                 *bd1 = *bd2;
794                 bd2->bd_bucket = NULL;
795                 return;
796         }
797
798         rc = cfs_hash_bd_compare(bd1, bd2);
799         if (rc == 0) {
800                 bd2->bd_bucket = NULL;
801
802         } else if (rc > 0) {
803                 swap(*bd1, *bd2); /* swab bd1 and bd2 */
804         }
805 }
806
807 void
808 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
809                      struct cfs_hash_bd *bds)
810 {
811         /* NB: caller should hold hs_lock.rw if REHASH is set */
812         cfs_hash_bd_from_key(hs, hs->hs_buckets,
813                              hs->hs_cur_bits, key, &bds[0]);
814         if (likely(hs->hs_rehash_buckets == NULL)) {
815                 /* no rehash or not rehashing */
816                 bds[1].bd_bucket = NULL;
817                 return;
818         }
819
820         LASSERT(hs->hs_rehash_bits != 0);
821         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
822                              hs->hs_rehash_bits, key, &bds[1]);
823
824         cfs_hash_bd_order(&bds[0], &bds[1]);
825 }
826
827 void
828 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
829 {
830         cfs_hash_multi_bd_lock(hs, bds, 2, excl);
831 }
832
833 void
834 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
835 {
836         cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
837 }
838
839 struct hlist_node *
840 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
841                                const void *key)
842 {
843         return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
844 }
845
846 struct hlist_node *
847 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
848                                 const void *key, struct hlist_node *hnode,
849                                 int noref)
850 {
851         return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
852                                                 hnode, noref);
853 }
854
855 struct hlist_node *
856 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
857                                 const void *key, struct hlist_node *hnode)
858 {
859         return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
860 }
861
862 static void
863 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
864                       int bkt_size, int prev_size, int size)
865 {
866         int     i;
867
868         for (i = prev_size; i < size; i++) {
869                 if (buckets[i] != NULL)
870                         LIBCFS_FREE(buckets[i], bkt_size);
871         }
872
873         LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
874 }
875
876 /*
877  * Create or grow bucket memory. Return old_buckets if no allocation was
878  * needed, the newly allocated buckets if allocation was needed and
879  * successful, and NULL on error.
880  */
881 static struct cfs_hash_bucket **
882 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
883                          unsigned int old_size, unsigned int new_size)
884 {
885         struct cfs_hash_bucket **new_bkts;
886         int                 i;
887
888         LASSERT(old_size == 0 || old_bkts != NULL);
889
890         if (old_bkts != NULL && old_size == new_size)
891                 return old_bkts;
892
893         LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
894         if (new_bkts == NULL)
895                 return NULL;
896
897         if (old_bkts != NULL) {
898                 memcpy(new_bkts, old_bkts,
899                        min(old_size, new_size) * sizeof(*old_bkts));
900         }
901
902         for (i = old_size; i < new_size; i++) {
903                 struct hlist_head *hhead;
904                 struct cfs_hash_bd     bd;
905
906                 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
907                 if (new_bkts[i] == NULL) {
908                         cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
909                                               old_size, new_size);
910                         return NULL;
911                 }
912
913                 new_bkts[i]->hsb_index   = i;
914                 new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
915                 new_bkts[i]->hsb_depmax  = -1; /* unknown */
916                 bd.bd_bucket = new_bkts[i];
917                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
918                         INIT_HLIST_HEAD(hhead);
919
920                 if (cfs_hash_with_no_lock(hs) ||
921                     cfs_hash_with_no_bktlock(hs))
922                         continue;
923
924                 if (cfs_hash_with_rw_bktlock(hs))
925                         rwlock_init(&new_bkts[i]->hsb_lock.rw);
926                 else if (cfs_hash_with_spin_bktlock(hs))
927                         spin_lock_init(&new_bkts[i]->hsb_lock.spin);
928                 else
929                         LBUG(); /* invalid use-case */
930         }
931         return new_bkts;
932 }
933
934 /**
935  * Initialize new libcfs hash, where:
936  * @name     - Descriptive hash name
937  * @cur_bits - Initial hash table size, in bits
938  * @max_bits - Maximum allowed hash table resize, in bits
939  * @ops      - Registered hash table operations
940  * @flags    - CFS_HASH_REHASH enable synamic hash resizing
941  *           - CFS_HASH_SORT enable chained hash sort
942  */
943 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
944
945 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
946 static int cfs_hash_dep_print(struct cfs_workitem *wi)
947 {
948         struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
949         int         dep;
950         int         bkt;
951         int         off;
952         int         bits;
953
954         spin_lock(&hs->hs_dep_lock);
955         dep  = hs->hs_dep_max;
956         bkt  = hs->hs_dep_bkt;
957         off  = hs->hs_dep_off;
958         bits = hs->hs_dep_bits;
959         spin_unlock(&hs->hs_dep_lock);
960
961         LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
962                       hs->hs_name, bits, dep, bkt, off);
963         spin_lock(&hs->hs_dep_lock);
964         hs->hs_dep_bits = 0; /* mark as workitem done */
965         spin_unlock(&hs->hs_dep_lock);
966         return 0;
967 }
968
969 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
970 {
971         spin_lock_init(&hs->hs_dep_lock);
972         cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
973 }
974
975 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
976 {
977         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
978                 return;
979
980         spin_lock(&hs->hs_dep_lock);
981         while (hs->hs_dep_bits != 0) {
982                 spin_unlock(&hs->hs_dep_lock);
983                 cond_resched();
984                 spin_lock(&hs->hs_dep_lock);
985         }
986         spin_unlock(&hs->hs_dep_lock);
987 }
988
989 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
990
991 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
992 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
993
994 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
995
996 struct cfs_hash *
997 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
998                 unsigned bkt_bits, unsigned extra_bytes,
999                 unsigned min_theta, unsigned max_theta,
1000                 struct cfs_hash_ops *ops, unsigned flags)
1001 {
1002         struct cfs_hash *hs;
1003         int         len;
1004
1005         ENTRY;
1006
1007         CLASSERT(CFS_HASH_THETA_BITS < 15);
1008
1009         LASSERT(name != NULL);
1010         LASSERT(ops != NULL);
1011         LASSERT(ops->hs_key);
1012         LASSERT(ops->hs_hash);
1013         LASSERT(ops->hs_object);
1014         LASSERT(ops->hs_keycmp);
1015         LASSERT(ops->hs_get != NULL);
1016         LASSERT(ops->hs_put != NULL || ops->hs_put_locked != NULL);
1017
1018         if ((flags & CFS_HASH_REHASH) != 0)
1019                 flags |= CFS_HASH_COUNTER; /* must have counter */
1020
1021         LASSERT(cur_bits > 0);
1022         LASSERT(cur_bits >= bkt_bits);
1023         LASSERT(max_bits >= cur_bits && max_bits < 31);
1024         LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1025         LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1026                      (flags & CFS_HASH_NO_LOCK) == 0));
1027         LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1028                       ops->hs_keycpy != NULL));
1029
1030         len = (flags & CFS_HASH_BIGNAME) == 0 ?
1031               CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1032         LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1033         if (hs == NULL)
1034                 RETURN(NULL);
1035
1036         strlcpy(hs->hs_name, name, len);
1037         hs->hs_flags = flags;
1038
1039         atomic_set(&hs->hs_refcount, 1);
1040         atomic_set(&hs->hs_count, 0);
1041
1042         cfs_hash_lock_setup(hs);
1043         cfs_hash_hlist_setup(hs);
1044
1045         hs->hs_cur_bits = (__u8)cur_bits;
1046         hs->hs_min_bits = (__u8)cur_bits;
1047         hs->hs_max_bits = (__u8)max_bits;
1048         hs->hs_bkt_bits = (__u8)bkt_bits;
1049
1050         hs->hs_ops         = ops;
1051         hs->hs_extra_bytes = extra_bytes;
1052         hs->hs_rehash_bits = 0;
1053         cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1054         cfs_hash_depth_wi_init(hs);
1055
1056         if (cfs_hash_with_rehash(hs))
1057                 __cfs_hash_set_theta(hs, min_theta, max_theta);
1058
1059         hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1060                                                   CFS_HASH_NBKT(hs));
1061         if (hs->hs_buckets != NULL)
1062                 return hs;
1063
1064         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1065         RETURN(NULL);
1066 }
1067 EXPORT_SYMBOL(cfs_hash_create);
1068
1069 /**
1070  * Cleanup libcfs hash @hs.
1071  */
1072 static void
1073 cfs_hash_destroy(struct cfs_hash *hs)
1074 {
1075         struct hlist_node     *hnode;
1076         struct hlist_node     *pos;
1077         struct cfs_hash_bd         bd;
1078         int                   i;
1079         ENTRY;
1080
1081         LASSERT(hs != NULL);
1082         LASSERT(!cfs_hash_is_exiting(hs) &&
1083                 !cfs_hash_is_iterating(hs));
1084
1085         /**
1086          * prohibit further rehashes, don't need any lock because
1087          * I'm the only (last) one can change it.
1088          */
1089         hs->hs_exiting = 1;
1090         if (cfs_hash_with_rehash(hs))
1091                 cfs_hash_rehash_cancel(hs);
1092
1093         cfs_hash_depth_wi_cancel(hs);
1094         /* rehash should be done/canceled */
1095         LASSERT(hs->hs_buckets != NULL &&
1096                 hs->hs_rehash_buckets == NULL);
1097
1098         cfs_hash_for_each_bucket(hs, &bd, i) {
1099                 struct hlist_head *hhead;
1100
1101                 LASSERT(bd.bd_bucket != NULL);
1102                 /* no need to take this lock, just for consistent code */
1103                 cfs_hash_bd_lock(hs, &bd, 1);
1104
1105                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1106                         hlist_for_each_safe(hnode, pos, hhead) {
1107                                         LASSERTF(!cfs_hash_with_assert_empty(hs),
1108                                         "hash %s bucket %u(%u) is not "
1109                                         " empty: %u items left\n",
1110                                         hs->hs_name, bd.bd_bucket->hsb_index,
1111                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1112                                 /* can't assert key valicate, because we
1113                                  * can interrupt rehash */
1114                                 cfs_hash_bd_del_locked(hs, &bd, hnode);
1115                                 cfs_hash_exit(hs, hnode);
1116                         }
1117                 }
1118                 LASSERT(bd.bd_bucket->hsb_count == 0);
1119                 cfs_hash_bd_unlock(hs, &bd, 1);
1120                 cond_resched();
1121         }
1122
1123         LASSERT(atomic_read(&hs->hs_count) == 0);
1124
1125         cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1126                               0, CFS_HASH_NBKT(hs));
1127         i = cfs_hash_with_bigname(hs) ?
1128             CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1129         LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1130
1131         EXIT;
1132 }
1133
1134 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1135 {
1136         if (atomic_inc_not_zero(&hs->hs_refcount))
1137                 return hs;
1138         return NULL;
1139 }
1140 EXPORT_SYMBOL(cfs_hash_getref);
1141
1142 void cfs_hash_putref(struct cfs_hash *hs)
1143 {
1144         if (atomic_dec_and_test(&hs->hs_refcount))
1145                 cfs_hash_destroy(hs);
1146 }
1147 EXPORT_SYMBOL(cfs_hash_putref);
1148
1149 static inline int
1150 cfs_hash_rehash_bits(struct cfs_hash *hs)
1151 {
1152         if (cfs_hash_with_no_lock(hs) ||
1153             !cfs_hash_with_rehash(hs))
1154                 return -EOPNOTSUPP;
1155
1156         if (unlikely(cfs_hash_is_exiting(hs)))
1157                 return -ESRCH;
1158
1159         if (unlikely(cfs_hash_is_rehashing(hs)))
1160                 return -EALREADY;
1161
1162         if (unlikely(cfs_hash_is_iterating(hs)))
1163                 return -EAGAIN;
1164
1165         /* XXX: need to handle case with max_theta != 2.0
1166          *      and the case with min_theta != 0.5 */
1167         if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1168             (__cfs_hash_theta(hs) > hs->hs_max_theta))
1169                 return hs->hs_cur_bits + 1;
1170
1171         if (!cfs_hash_with_shrink(hs))
1172                 return 0;
1173
1174         if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1175             (__cfs_hash_theta(hs) < hs->hs_min_theta))
1176                 return hs->hs_cur_bits - 1;
1177
1178         return 0;
1179 }
1180
1181 /**
1182  * don't allow inline rehash if:
1183  * - user wants non-blocking change (add/del) on hash table
1184  * - too many elements
1185  */
1186 static inline int
1187 cfs_hash_rehash_inline(struct cfs_hash *hs)
1188 {
1189         return !cfs_hash_with_nblk_change(hs) &&
1190                atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1191 }
1192
1193 /**
1194  * Add item @hnode to libcfs hash @hs using @key.  The registered
1195  * ops->hs_get function will be called when the item is added.
1196  */
1197 void
1198 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1199 {
1200         struct cfs_hash_bd   bd;
1201         int             bits;
1202
1203         LASSERT(hlist_unhashed(hnode));
1204
1205         cfs_hash_lock(hs, 0);
1206         cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1207
1208         cfs_hash_key_validate(hs, key, hnode);
1209         cfs_hash_bd_add_locked(hs, &bd, hnode);
1210
1211         cfs_hash_bd_unlock(hs, &bd, 1);
1212
1213         bits = cfs_hash_rehash_bits(hs);
1214         cfs_hash_unlock(hs, 0);
1215         if (bits > 0)
1216                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1217 }
1218 EXPORT_SYMBOL(cfs_hash_add);
1219
1220 static struct hlist_node *
1221 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1222                      struct hlist_node *hnode, int noref)
1223 {
1224         struct hlist_node *ehnode;
1225         struct cfs_hash_bd     bds[2];
1226         int               bits = 0;
1227
1228         LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
1229
1230         cfs_hash_lock(hs, 0);
1231         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1232
1233         cfs_hash_key_validate(hs, key, hnode);
1234         ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1235                                                  hnode, noref);
1236         cfs_hash_dual_bd_unlock(hs, bds, 1);
1237
1238         if (ehnode == hnode) /* new item added */
1239                 bits = cfs_hash_rehash_bits(hs);
1240         cfs_hash_unlock(hs, 0);
1241         if (bits > 0)
1242                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1243
1244         return ehnode;
1245 }
1246
1247 /**
1248  * Add item @hnode to libcfs hash @hs using @key.  The registered
1249  * ops->hs_get function will be called if the item was added.
1250  * Returns 0 on success or -EALREADY on key collisions.
1251  */
1252 int
1253 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1254                     struct hlist_node *hnode)
1255 {
1256         return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1257                -EALREADY : 0;
1258 }
1259 EXPORT_SYMBOL(cfs_hash_add_unique);
1260
1261 /**
1262  * Add item @hnode to libcfs hash @hs using @key.  If this @key
1263  * already exists in the hash then ops->hs_get will be called on the
1264  * conflicting entry and that entry will be returned to the caller.
1265  * Otherwise ops->hs_get is called on the item which was added.
1266  */
1267 void *
1268 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1269                         struct hlist_node *hnode)
1270 {
1271         hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1272
1273         return cfs_hash_object(hs, hnode);
1274 }
1275 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1276
1277 /**
1278  * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1279  * is required to ensure the correct hash bucket is locked since there
1280  * is no direct linkage from the item to the bucket.  The object
1281  * removed from the hash will be returned and obs->hs_put is called
1282  * on the removed object.
1283  */
1284 void *
1285 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1286 {
1287         void           *obj  = NULL;
1288         int             bits = 0;
1289         struct cfs_hash_bd   bds[2];
1290
1291         cfs_hash_lock(hs, 0);
1292         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1293
1294         /* NB: do nothing if @hnode is not in hash table */
1295         if (hnode == NULL || !hlist_unhashed(hnode)) {
1296                 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1297                         cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1298                 } else {
1299                         hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1300                                                                 key, hnode);
1301                 }
1302         }
1303
1304         if (hnode != NULL) {
1305                 obj  = cfs_hash_object(hs, hnode);
1306                 bits = cfs_hash_rehash_bits(hs);
1307         }
1308
1309         cfs_hash_dual_bd_unlock(hs, bds, 1);
1310         cfs_hash_unlock(hs, 0);
1311         if (bits > 0)
1312                 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1313
1314         return obj;
1315 }
1316 EXPORT_SYMBOL(cfs_hash_del);
1317
1318 /**
1319  * Delete item given @key in libcfs hash @hs.  The first @key found in
1320  * the hash will be removed, if the key exists multiple times in the hash
1321  * @hs this function must be called once per key.  The removed object
1322  * will be returned and ops->hs_put is called on the removed object.
1323  */
1324 void *
1325 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1326 {
1327         return cfs_hash_del(hs, key, NULL);
1328 }
1329 EXPORT_SYMBOL(cfs_hash_del_key);
1330
1331 /**
1332  * Lookup an item using @key in the libcfs hash @hs and return it.
1333  * If the @key is found in the hash hs->hs_get() is called and the
1334  * matching objects is returned.  It is the callers responsibility
1335  * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1336  * when when finished with the object.  If the @key was not found
1337  * in the hash @hs NULL is returned.
1338  */
1339 void *
1340 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1341 {
1342         void                 *obj = NULL;
1343         struct hlist_node     *hnode;
1344         struct cfs_hash_bd         bds[2];
1345
1346         cfs_hash_lock(hs, 0);
1347         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1348
1349         hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1350         if (hnode != NULL)
1351                 obj = cfs_hash_object(hs, hnode);
1352
1353         cfs_hash_dual_bd_unlock(hs, bds, 0);
1354         cfs_hash_unlock(hs, 0);
1355
1356         return obj;
1357 }
1358 EXPORT_SYMBOL(cfs_hash_lookup);
1359
1360 static void
1361 cfs_hash_for_each_enter(struct cfs_hash *hs)
1362 {
1363         LASSERT(!cfs_hash_is_exiting(hs));
1364
1365         if (!cfs_hash_with_rehash(hs))
1366                 return;
1367         /*
1368          * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1369          * because it's just an unreliable signal to rehash-thread,
1370          * rehash-thread will try to finish rehash ASAP when seeing this.
1371          */
1372         hs->hs_iterating = 1;
1373
1374         cfs_hash_lock(hs, 1);
1375         hs->hs_iterators++;
1376
1377         /* NB: iteration is mostly called by service thread,
1378          * we tend to cancel pending rehash-request, instead of
1379          * blocking service thread, we will relaunch rehash request
1380          * after iteration */
1381         if (cfs_hash_is_rehashing(hs))
1382                 cfs_hash_rehash_cancel_locked(hs);
1383         cfs_hash_unlock(hs, 1);
1384 }
1385
1386 static void
1387 cfs_hash_for_each_exit(struct cfs_hash *hs)
1388 {
1389         int remained;
1390         int bits;
1391
1392         if (!cfs_hash_with_rehash(hs))
1393                 return;
1394         cfs_hash_lock(hs, 1);
1395         remained = --hs->hs_iterators;
1396         bits = cfs_hash_rehash_bits(hs);
1397         cfs_hash_unlock(hs, 1);
1398         /* NB: it's race on cfs_has_t::hs_iterating, see above */
1399         if (remained == 0)
1400                 hs->hs_iterating = 0;
1401         if (bits > 0) {
1402                 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1403                                     CFS_HASH_LOOP_HOG);
1404         }
1405 }
1406
1407 /**
1408  * For each item in the libcfs hash @hs call the passed callback @func
1409  * and pass to it as an argument each hash item and the private @data.
1410  *
1411  * a) the function may sleep!
1412  * b) during the callback:
1413  *    . the bucket lock is held so the callback must never sleep.
1414  *    . if @removal_safe is true, use can remove current item by
1415  *      cfs_hash_bd_del_locked
1416  */
1417 static __u64
1418 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1419                         void *data, int remove_safe)
1420 {
1421         struct hlist_node       *hnode;
1422         struct hlist_node       *pos;
1423         struct cfs_hash_bd      bd;
1424         __u64                   count = 0;
1425         int                     excl  = !!remove_safe;
1426         int                     loop  = 0;
1427         int                     i;
1428         ENTRY;
1429
1430         cfs_hash_for_each_enter(hs);
1431
1432         cfs_hash_lock(hs, 0);
1433         LASSERT(!cfs_hash_is_rehashing(hs));
1434
1435         cfs_hash_for_each_bucket(hs, &bd, i) {
1436                 struct hlist_head *hhead;
1437
1438                 cfs_hash_bd_lock(hs, &bd, excl);
1439                 if (func == NULL) { /* only glimpse size */
1440                         count += bd.bd_bucket->hsb_count;
1441                         cfs_hash_bd_unlock(hs, &bd, excl);
1442                         continue;
1443                 }
1444
1445                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1446                         hlist_for_each_safe(hnode, pos, hhead) {
1447                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1448                                 count++;
1449                                 loop++;
1450                                 if (func(hs, &bd, hnode, data)) {
1451                                         cfs_hash_bd_unlock(hs, &bd, excl);
1452                                         goto out;
1453                                 }
1454                         }
1455                 }
1456                 cfs_hash_bd_unlock(hs, &bd, excl);
1457                 if (loop < CFS_HASH_LOOP_HOG)
1458                         continue;
1459                 loop = 0;
1460                 cfs_hash_unlock(hs, 0);
1461                 cond_resched();
1462                 cfs_hash_lock(hs, 0);
1463         }
1464  out:
1465         cfs_hash_unlock(hs, 0);
1466
1467         cfs_hash_for_each_exit(hs);
1468         RETURN(count);
1469 }
1470
1471 struct cfs_hash_cond_arg {
1472         cfs_hash_cond_opt_cb_t  func;
1473         void                   *arg;
1474 };
1475
1476 static int
1477 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1478                          struct hlist_node *hnode, void *data)
1479 {
1480         struct cfs_hash_cond_arg *cond = data;
1481
1482         if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1483                 cfs_hash_bd_del_locked(hs, bd, hnode);
1484         return 0;
1485 }
1486
1487 /**
1488  * Delete item from the libcfs hash @hs when @func return true.
1489  * The write lock being hold during loop for each bucket to avoid
1490  * any object be reference.
1491  */
1492 void
1493 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1494 {
1495         struct cfs_hash_cond_arg arg = {
1496                 .func   = func,
1497                 .arg    = data,
1498         };
1499
1500         cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1501 }
1502 EXPORT_SYMBOL(cfs_hash_cond_del);
1503
1504 void
1505 cfs_hash_for_each(struct cfs_hash *hs,
1506                   cfs_hash_for_each_cb_t func, void *data)
1507 {
1508         cfs_hash_for_each_tight(hs, func, data, 0);
1509 }
1510 EXPORT_SYMBOL(cfs_hash_for_each);
1511
1512 void
1513 cfs_hash_for_each_safe(struct cfs_hash *hs,
1514                        cfs_hash_for_each_cb_t func, void *data)
1515 {
1516         cfs_hash_for_each_tight(hs, func, data, 1);
1517 }
1518 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1519
1520 static int
1521 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1522               struct hlist_node *hnode, void *data)
1523 {
1524         *(int *)data = 0;
1525         return 1; /* return 1 to break the loop */
1526 }
1527
1528 int
1529 cfs_hash_is_empty(struct cfs_hash *hs)
1530 {
1531         int empty = 1;
1532
1533         cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1534         return empty;
1535 }
1536 EXPORT_SYMBOL(cfs_hash_is_empty);
1537
1538 __u64
1539 cfs_hash_size_get(struct cfs_hash *hs)
1540 {
1541         return cfs_hash_with_counter(hs) ?
1542                atomic_read(&hs->hs_count) :
1543                cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1544 }
1545 EXPORT_SYMBOL(cfs_hash_size_get);
1546
1547 /*
1548  * cfs_hash_for_each_relax:
1549  * Iterate the hash table and call @func on each item without
1550  * any lock. This function can't guarantee to finish iteration
1551  * if these features are enabled:
1552  *
1553  *  a. if rehash_key is enabled, an item can be moved from
1554  *     one bucket to another bucket
1555  *  b. user can remove non-zero-ref item from hash-table,
1556  *     so the item can be removed from hash-table, even worse,
1557  *     it's possible that user changed key and insert to another
1558  *     hash bucket.
1559  * there's no way for us to finish iteration correctly on previous
1560  * two cases, so iteration has to be stopped on change.
1561  */
1562 static int
1563 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1564                         void *data, int start)
1565 {
1566         struct hlist_node       *hnode;
1567         struct hlist_node       *next = NULL;
1568         struct cfs_hash_bd      bd;
1569         __u32                   version;
1570         int                     count = 0;
1571         int                     stop_on_change;
1572         int                     has_put_locked;
1573         int                     rc = 0;
1574         int                     i, end = -1;
1575         ENTRY;
1576
1577         stop_on_change = cfs_hash_with_rehash_key(hs) ||
1578                          !cfs_hash_with_no_itemref(hs);
1579         has_put_locked = hs->hs_ops->hs_put_locked != NULL;
1580         cfs_hash_lock(hs, 0);
1581 again:
1582         LASSERT(!cfs_hash_is_rehashing(hs));
1583
1584         cfs_hash_for_each_bucket(hs, &bd, i) {
1585                 struct hlist_head *hhead;
1586
1587                 if (i < start)
1588                         continue;
1589                 else if (end > 0 && i >= end)
1590                         break;
1591
1592                 cfs_hash_bd_lock(hs, &bd, 0);
1593                 version = cfs_hash_bd_version_get(&bd);
1594
1595                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1596                         hnode = hhead->first;
1597                         if (hnode == NULL)
1598                                 continue;
1599                         cfs_hash_get(hs, hnode);
1600                         for (; hnode != NULL; hnode = next) {
1601                                 cfs_hash_bucket_validate(hs, &bd, hnode);
1602                                 next = hnode->next;
1603                                 if (next != NULL)
1604                                         cfs_hash_get(hs, next);
1605                                 cfs_hash_bd_unlock(hs, &bd, 0);
1606                                 cfs_hash_unlock(hs, 0);
1607
1608                                 rc = func(hs, &bd, hnode, data);
1609                                 if (stop_on_change || !has_put_locked)
1610                                         cfs_hash_put(hs, hnode);
1611
1612                                 cond_resched();
1613                                 count++;
1614
1615                                 cfs_hash_lock(hs, 0);
1616                                 cfs_hash_bd_lock(hs, &bd, 0);
1617                                 if (stop_on_change) {
1618                                         if (version !=
1619                                             cfs_hash_bd_version_get(&bd))
1620                                                 rc = -EINTR;
1621                                 } else if (has_put_locked) {
1622                                         cfs_hash_put_locked(hs, hnode);
1623                                 }
1624                                 if (rc) /* callback wants to break iteration */
1625                                         break;
1626                         }
1627                         if (next != NULL) {
1628                                 if (has_put_locked) {
1629                                         cfs_hash_put_locked(hs, next);
1630                                         next = NULL;
1631                                 }
1632                                 break;
1633                         } else if (rc != 0) {
1634                                 break;
1635                         }
1636                 }
1637                 cfs_hash_bd_unlock(hs, &bd, 0);
1638                 if (next != NULL && !has_put_locked) {
1639                         cfs_hash_put(hs, next);
1640                         next = NULL;
1641                 }
1642                 if (rc) /* callback wants to break iteration */
1643                         break;
1644         }
1645
1646         if (start > 0 && rc == 0) {
1647                 end = start;
1648                 start = 0;
1649                 goto again;
1650         }
1651
1652         cfs_hash_unlock(hs, 0);
1653         return count;
1654 }
1655
1656 int
1657 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1658                          cfs_hash_for_each_cb_t func, void *data, int start)
1659 {
1660         ENTRY;
1661
1662         if (cfs_hash_with_no_lock(hs) ||
1663             cfs_hash_with_rehash_key(hs) ||
1664             !cfs_hash_with_no_itemref(hs))
1665                 RETURN(-EOPNOTSUPP);
1666
1667         if (hs->hs_ops->hs_get == NULL ||
1668            (hs->hs_ops->hs_put == NULL &&
1669             hs->hs_ops->hs_put_locked == NULL))
1670                 RETURN(-EOPNOTSUPP);
1671
1672         cfs_hash_for_each_enter(hs);
1673         cfs_hash_for_each_relax(hs, func, data, start);
1674         cfs_hash_for_each_exit(hs);
1675
1676         RETURN(0);
1677 }
1678 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1679
1680 /**
1681  * For each hash bucket in the libcfs hash @hs call the passed callback
1682  * @func until all the hash buckets are empty.  The passed callback @func
1683  * or the previously registered callback hs->hs_put must remove the item
1684  * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1685  * functions.  No rwlocks will be held during the callback @func it is
1686  * safe to sleep if needed.  This function will not terminate until the
1687  * hash is empty.  Note it is still possible to concurrently add new
1688  * items in to the hash.  It is the callers responsibility to ensure
1689  * the required locking is in place to prevent concurrent insertions.
1690  */
1691 int
1692 cfs_hash_for_each_empty(struct cfs_hash *hs,
1693                         cfs_hash_for_each_cb_t func, void *data)
1694 {
1695         unsigned  i = 0;
1696         ENTRY;
1697
1698         if (cfs_hash_with_no_lock(hs))
1699                 return -EOPNOTSUPP;
1700
1701         if (hs->hs_ops->hs_get == NULL ||
1702            (hs->hs_ops->hs_put == NULL &&
1703             hs->hs_ops->hs_put_locked == NULL))
1704                 return -EOPNOTSUPP;
1705
1706         cfs_hash_for_each_enter(hs);
1707         while (cfs_hash_for_each_relax(hs, func, data, 0)) {
1708                 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1709                        hs->hs_name, i++);
1710         }
1711         cfs_hash_for_each_exit(hs);
1712         RETURN(0);
1713 }
1714 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1715
1716 void
1717 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1718                         cfs_hash_for_each_cb_t func, void *data)
1719 {
1720         struct hlist_head *hhead;
1721         struct hlist_node *hnode;
1722         struct cfs_hash_bd         bd;
1723
1724         cfs_hash_for_each_enter(hs);
1725         cfs_hash_lock(hs, 0);
1726         if (hindex >= CFS_HASH_NHLIST(hs))
1727                 goto out;
1728
1729         cfs_hash_bd_index_set(hs, hindex, &bd);
1730
1731         cfs_hash_bd_lock(hs, &bd, 0);
1732         hhead = cfs_hash_bd_hhead(hs, &bd);
1733         hlist_for_each(hnode, hhead) {
1734                 if (func(hs, &bd, hnode, data))
1735                         break;
1736         }
1737         cfs_hash_bd_unlock(hs, &bd, 0);
1738 out:
1739         cfs_hash_unlock(hs, 0);
1740         cfs_hash_for_each_exit(hs);
1741 }
1742
1743 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1744
1745 /*
1746  * For each item in the libcfs hash @hs which matches the @key call
1747  * the passed callback @func and pass to it as an argument each hash
1748  * item and the private @data. During the callback the bucket lock
1749  * is held so the callback must never sleep.
1750    */
1751 void
1752 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1753                         cfs_hash_for_each_cb_t func, void *data)
1754 {
1755         struct hlist_node *hnode;
1756         struct cfs_hash_bd         bds[2];
1757         unsigned           i;
1758
1759         cfs_hash_lock(hs, 0);
1760
1761         cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1762
1763         cfs_hash_for_each_bd(bds, 2, i) {
1764                 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1765
1766                 hlist_for_each(hnode, hlist) {
1767                         cfs_hash_bucket_validate(hs, &bds[i], hnode);
1768
1769                         if (cfs_hash_keycmp(hs, key, hnode)) {
1770                                 if (func(hs, &bds[i], hnode, data))
1771                                         break;
1772                         }
1773                 }
1774         }
1775
1776         cfs_hash_dual_bd_unlock(hs, bds, 0);
1777         cfs_hash_unlock(hs, 0);
1778 }
1779 EXPORT_SYMBOL(cfs_hash_for_each_key);
1780
1781 /**
1782  * Rehash the libcfs hash @hs to the given @bits.  This can be used
1783  * to grow the hash size when excessive chaining is detected, or to
1784  * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1785  * flag is set in @hs the libcfs hash may be dynamically rehashed
1786  * during addition or removal if the hash's theta value exceeds
1787  * either the hs->hs_min_theta or hs->max_theta values.  By default
1788  * these values are tuned to keep the chained hash depth small, and
1789  * this approach assumes a reasonably uniform hashing function.  The
1790  * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1791  */
1792 void
1793 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1794 {
1795         int     i;
1796
1797         /* need hold cfs_hash_lock(hs, 1) */
1798         LASSERT(cfs_hash_with_rehash(hs) &&
1799                 !cfs_hash_with_no_lock(hs));
1800
1801         if (!cfs_hash_is_rehashing(hs))
1802                 return;
1803
1804         if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1805                 hs->hs_rehash_bits = 0;
1806                 return;
1807         }
1808
1809         for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1810                 cfs_hash_unlock(hs, 1);
1811                 /* raise console warning while waiting too long */
1812                 CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
1813                        "hash %s is still rehashing, rescheded %d\n",
1814                        hs->hs_name, i - 1);
1815                 cond_resched();
1816                 cfs_hash_lock(hs, 1);
1817         }
1818 }
1819
1820 void
1821 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1822 {
1823         cfs_hash_lock(hs, 1);
1824         cfs_hash_rehash_cancel_locked(hs);
1825         cfs_hash_unlock(hs, 1);
1826 }
1827
1828 int
1829 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1830 {
1831         int     rc;
1832
1833         LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1834
1835         cfs_hash_lock(hs, 1);
1836
1837         rc = cfs_hash_rehash_bits(hs);
1838         if (rc <= 0) {
1839                 cfs_hash_unlock(hs, 1);
1840                 return rc;
1841         }
1842
1843         hs->hs_rehash_bits = rc;
1844         if (!do_rehash) {
1845                 /* launch and return */
1846                 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1847                 cfs_hash_unlock(hs, 1);
1848                 return 0;
1849         }
1850
1851         /* rehash right now */
1852         cfs_hash_unlock(hs, 1);
1853
1854         return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1855 }
1856
1857 static int
1858 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1859 {
1860         struct cfs_hash_bd      new;
1861         struct hlist_head *hhead;
1862         struct hlist_node *hnode;
1863         struct hlist_node *pos;
1864         void              *key;
1865         int                c = 0;
1866
1867         /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1868         cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1869                 hlist_for_each_safe(hnode, pos, hhead) {
1870                         key = cfs_hash_key(hs, hnode);
1871                         LASSERT(key != NULL);
1872                         /* Validate hnode is in the correct bucket. */
1873                         cfs_hash_bucket_validate(hs, old, hnode);
1874                         /*
1875                          * Delete from old hash bucket; move to new bucket.
1876                          * ops->hs_key must be defined.
1877                          */
1878                         cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1879                                              hs->hs_rehash_bits, key, &new);
1880                         cfs_hash_bd_move_locked(hs, old, &new, hnode);
1881                         c++;
1882                 }
1883         }
1884         return c;
1885 }
1886
1887 static int
1888 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1889 {
1890         struct cfs_hash         *hs =
1891                 container_of(wi, struct cfs_hash, hs_rehash_wi);
1892         struct cfs_hash_bucket **bkts;
1893         struct cfs_hash_bd      bd;
1894         unsigned int            old_size;
1895         unsigned int            new_size;
1896         int                     bsize;
1897         int                     count = 0;
1898         int                     rc = 0;
1899         int                     i;
1900
1901         LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
1902
1903         cfs_hash_lock(hs, 0);
1904         LASSERT(cfs_hash_is_rehashing(hs));
1905
1906         old_size = CFS_HASH_NBKT(hs);
1907         new_size = CFS_HASH_RH_NBKT(hs);
1908
1909         cfs_hash_unlock(hs, 0);
1910
1911         /*
1912          * don't need hs::hs_rwlock for hs::hs_buckets,
1913          * because nobody can change bkt-table except me.
1914          */
1915         bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1916                                         old_size, new_size);
1917         cfs_hash_lock(hs, 1);
1918         if (bkts == NULL) {
1919                 rc = -ENOMEM;
1920                 goto out;
1921         }
1922
1923         if (bkts == hs->hs_buckets) {
1924                 bkts = NULL; /* do nothing */
1925                 goto out;
1926         }
1927
1928         rc = __cfs_hash_theta(hs);
1929         if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1930                 /* free the new allocated bkt-table */
1931                 old_size = new_size;
1932                 new_size = CFS_HASH_NBKT(hs);
1933                 rc = -EALREADY;
1934                 goto out;
1935         }
1936
1937         LASSERT(hs->hs_rehash_buckets == NULL);
1938         hs->hs_rehash_buckets = bkts;
1939
1940         rc = 0;
1941         cfs_hash_for_each_bucket(hs, &bd, i) {
1942                 if (cfs_hash_is_exiting(hs)) {
1943                         rc = -ESRCH;
1944                         /* someone wants to destroy the hash, abort now */
1945                         if (old_size < new_size) /* OK to free old bkt-table */
1946                                 break;
1947                         /* it's shrinking, need free new bkt-table */
1948                         hs->hs_rehash_buckets = NULL;
1949                         old_size = new_size;
1950                         new_size = CFS_HASH_NBKT(hs);
1951                         goto out;
1952                 }
1953
1954                 count += cfs_hash_rehash_bd(hs, &bd);
1955                 if (count < CFS_HASH_LOOP_HOG ||
1956                     cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1957                         continue;
1958                 }
1959
1960                 count = 0;
1961                 cfs_hash_unlock(hs, 1);
1962                 cond_resched();
1963                 cfs_hash_lock(hs, 1);
1964         }
1965
1966         hs->hs_rehash_count++;
1967
1968         bkts = hs->hs_buckets;
1969         hs->hs_buckets = hs->hs_rehash_buckets;
1970         hs->hs_rehash_buckets = NULL;
1971
1972         hs->hs_cur_bits = hs->hs_rehash_bits;
1973  out:
1974         hs->hs_rehash_bits = 0;
1975         if (rc == -ESRCH) /* never be scheduled again */
1976                 cfs_wi_exit(cfs_sched_rehash, wi);
1977         bsize = cfs_hash_bkt_size(hs);
1978         cfs_hash_unlock(hs, 1);
1979         /* can't refer to @hs anymore because it could be destroyed */
1980         if (bkts != NULL)
1981                 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1982         if (rc != 0)
1983                 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1984         /* return 1 only if cfs_wi_exit is called */
1985         return rc == -ESRCH;
1986 }
1987
1988 /**
1989  * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1990  * @old_key must be provided to locate the objects previous location
1991  * in the hash, and the @new_key will be used to reinsert the object.
1992  * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1993  * combo when it is critical that there is no window in time where the
1994  * object is missing from the hash.  When an object is being rehashed
1995  * the registered cfs_hash_get() and cfs_hash_put() functions will
1996  * not be called.
1997  */
1998 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1999                          void *new_key, struct hlist_node *hnode)
2000 {
2001         struct cfs_hash_bd        bds[3];
2002         struct cfs_hash_bd        old_bds[2];
2003         struct cfs_hash_bd        new_bd;
2004
2005         LASSERT(!hlist_unhashed(hnode));
2006
2007         cfs_hash_lock(hs, 0);
2008
2009         cfs_hash_dual_bd_get(hs, old_key, old_bds);
2010         cfs_hash_bd_get(hs, new_key, &new_bd);
2011
2012         bds[0] = old_bds[0];
2013         bds[1] = old_bds[1];
2014         bds[2] = new_bd;
2015
2016         /* NB: bds[0] and bds[1] are ordered already */
2017         cfs_hash_bd_order(&bds[1], &bds[2]);
2018         cfs_hash_bd_order(&bds[0], &bds[1]);
2019
2020         cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2021         if (likely(old_bds[1].bd_bucket == NULL)) {
2022                 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2023         } else {
2024                 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2025                 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2026         }
2027         /* overwrite key inside locks, otherwise may screw up with
2028          * other operations, i.e: rehash */
2029         cfs_hash_keycpy(hs, hnode, new_key);
2030
2031         cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2032         cfs_hash_unlock(hs, 0);
2033 }
2034 EXPORT_SYMBOL(cfs_hash_rehash_key);
2035
2036 void cfs_hash_debug_header(struct seq_file *m)
2037 {
2038         seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2039                    CFS_HASH_BIGNAME_LEN, "name");
2040 }
2041 EXPORT_SYMBOL(cfs_hash_debug_header);
2042
2043 static struct cfs_hash_bucket **
2044 cfs_hash_full_bkts(struct cfs_hash *hs)
2045 {
2046         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2047         if (hs->hs_rehash_buckets == NULL)
2048                 return hs->hs_buckets;
2049
2050         LASSERT(hs->hs_rehash_bits != 0);
2051         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2052                hs->hs_rehash_buckets : hs->hs_buckets;
2053 }
2054
2055 static unsigned int
2056 cfs_hash_full_nbkt(struct cfs_hash *hs)
2057 {
2058         /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2059         if (hs->hs_rehash_buckets == NULL)
2060                 return CFS_HASH_NBKT(hs);
2061
2062         LASSERT(hs->hs_rehash_bits != 0);
2063         return hs->hs_rehash_bits > hs->hs_cur_bits ?
2064                CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2065 }
2066
2067 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2068 {
2069         int dist[8] = { 0, };
2070         int maxdep = -1;
2071         int maxdepb = -1;
2072         int total = 0;
2073         int theta;
2074         int i;
2075
2076         cfs_hash_lock(hs, 0);
2077         theta = __cfs_hash_theta(hs);
2078
2079         seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2080                    CFS_HASH_BIGNAME_LEN, hs->hs_name,
2081                    1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2082                    1 << hs->hs_max_bits,
2083                    __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2084                    __cfs_hash_theta_int(hs->hs_min_theta),
2085                    __cfs_hash_theta_frac(hs->hs_min_theta),
2086                    __cfs_hash_theta_int(hs->hs_max_theta),
2087                    __cfs_hash_theta_frac(hs->hs_max_theta),
2088                    hs->hs_flags, hs->hs_rehash_count);
2089
2090         /*
2091          * The distribution is a summary of the chained hash depth in
2092          * each of the libcfs hash buckets.  Each buckets hsb_count is
2093          * divided by the hash theta value and used to generate a
2094          * histogram of the hash distribution.  A uniform hash will
2095          * result in all hash buckets being close to the average thus
2096          * only the first few entries in the histogram will be non-zero.
2097          * If you hash function results in a non-uniform hash the will
2098          * be observable by outlier bucks in the distribution histogram.
2099          *
2100          * Uniform hash distribution:           128/128/0/0/0/0/0/0
2101          * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2102          */
2103         for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2104                 struct cfs_hash_bd bd;
2105
2106                 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2107                 cfs_hash_bd_lock(hs, &bd, 0);
2108                 if (maxdep < bd.bd_bucket->hsb_depmax) {
2109                         maxdep  = bd.bd_bucket->hsb_depmax;
2110                         maxdepb = ffz(~maxdep);
2111                 }
2112                 total += bd.bd_bucket->hsb_count;
2113                 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2114                 cfs_hash_bd_unlock(hs, &bd, 0);
2115         }
2116
2117         seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2118         for (i = 0; i < 8; i++)
2119                 seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2120
2121         cfs_hash_unlock(hs, 0);
2122 }
2123 EXPORT_SYMBOL(cfs_hash_debug_str);